subr_vmem.c revision 254307
1252330Sjeff/*- 2252330Sjeff * Copyright (c)2006,2007,2008,2009 YAMAMOTO Takashi, 3252330Sjeff * Copyright (c) 2013 EMC Corp. 4252330Sjeff * All rights reserved. 5252330Sjeff * 6252330Sjeff * Redistribution and use in source and binary forms, with or without 7252330Sjeff * modification, are permitted provided that the following conditions 8252330Sjeff * are met: 9252330Sjeff * 1. Redistributions of source code must retain the above copyright 10252330Sjeff * notice, this list of conditions and the following disclaimer. 11252330Sjeff * 2. Redistributions in binary form must reproduce the above copyright 12252330Sjeff * notice, this list of conditions and the following disclaimer in the 13252330Sjeff * documentation and/or other materials provided with the distribution. 14252330Sjeff * 15252330Sjeff * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 16252330Sjeff * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 17252330Sjeff * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 18252330Sjeff * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 19252330Sjeff * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 20252330Sjeff * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 21252330Sjeff * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 22252330Sjeff * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 23252330Sjeff * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 24252330Sjeff * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 25252330Sjeff * SUCH DAMAGE. 26252330Sjeff */ 27252330Sjeff 28252330Sjeff/* 29252330Sjeff * From: 30252330Sjeff * $NetBSD: vmem_impl.h,v 1.2 2013/01/29 21:26:24 para Exp $ 31252330Sjeff * $NetBSD: subr_vmem.c,v 1.83 2013/03/06 11:20:10 yamt Exp $ 32252330Sjeff */ 33252330Sjeff 34252330Sjeff/* 35252330Sjeff * reference: 36252330Sjeff * - Magazines and Vmem: Extending the Slab Allocator 37252330Sjeff * to Many CPUs and Arbitrary Resources 38252330Sjeff * http://www.usenix.org/event/usenix01/bonwick.html 39252330Sjeff */ 40252330Sjeff 41252330Sjeff#include <sys/cdefs.h> 42252330Sjeff__FBSDID("$FreeBSD: head/sys/kern/subr_vmem.c 254307 2013-08-13 22:40:43Z jeff $"); 43252330Sjeff 44252330Sjeff#include "opt_ddb.h" 45252330Sjeff 46252330Sjeff#include <sys/param.h> 47252330Sjeff#include <sys/systm.h> 48252330Sjeff#include <sys/kernel.h> 49252330Sjeff#include <sys/queue.h> 50252330Sjeff#include <sys/callout.h> 51252330Sjeff#include <sys/hash.h> 52252330Sjeff#include <sys/lock.h> 53252330Sjeff#include <sys/malloc.h> 54252330Sjeff#include <sys/mutex.h> 55252330Sjeff#include <sys/smp.h> 56252330Sjeff#include <sys/condvar.h> 57252330Sjeff#include <sys/taskqueue.h> 58252330Sjeff#include <sys/vmem.h> 59252330Sjeff 60254307Sjeff#include "opt_vm.h" 61254307Sjeff 62252330Sjeff#include <vm/uma.h> 63252330Sjeff#include <vm/vm.h> 64252330Sjeff#include <vm/pmap.h> 65252330Sjeff#include <vm/vm_map.h> 66254025Sjeff#include <vm/vm_object.h> 67252330Sjeff#include <vm/vm_kern.h> 68252330Sjeff#include <vm/vm_extern.h> 69252330Sjeff#include <vm/vm_param.h> 70252330Sjeff#include <vm/vm_pageout.h> 71252330Sjeff 72252330Sjeff#define VMEM_MAXORDER (sizeof(vmem_size_t) * NBBY) 73252330Sjeff 74252330Sjeff#define VMEM_HASHSIZE_MIN 16 75252330Sjeff#define VMEM_HASHSIZE_MAX 131072 76252330Sjeff 77252330Sjeff#define VMEM_QCACHE_IDX_MAX 16 78252330Sjeff 79252330Sjeff#define VMEM_FITMASK (M_BESTFIT | M_FIRSTFIT) 80252330Sjeff 81252330Sjeff#define VMEM_FLAGS \ 82252330Sjeff (M_NOWAIT | M_WAITOK | M_USE_RESERVE | M_NOVM | M_BESTFIT | M_FIRSTFIT) 83252330Sjeff 84252330Sjeff#define BT_FLAGS (M_NOWAIT | M_WAITOK | M_USE_RESERVE | M_NOVM) 85252330Sjeff 86252330Sjeff#define QC_NAME_MAX 16 87252330Sjeff 88252330Sjeff/* 89252330Sjeff * Data structures private to vmem. 90252330Sjeff */ 91252330SjeffMALLOC_DEFINE(M_VMEM, "vmem", "vmem internal structures"); 92252330Sjeff 93252330Sjefftypedef struct vmem_btag bt_t; 94252330Sjeff 95252330SjeffTAILQ_HEAD(vmem_seglist, vmem_btag); 96252330SjeffLIST_HEAD(vmem_freelist, vmem_btag); 97252330SjeffLIST_HEAD(vmem_hashlist, vmem_btag); 98252330Sjeff 99252330Sjeffstruct qcache { 100252330Sjeff uma_zone_t qc_cache; 101252330Sjeff vmem_t *qc_vmem; 102252330Sjeff vmem_size_t qc_size; 103252330Sjeff char qc_name[QC_NAME_MAX]; 104252330Sjeff}; 105252330Sjefftypedef struct qcache qcache_t; 106252330Sjeff#define QC_POOL_TO_QCACHE(pool) ((qcache_t *)(pool->pr_qcache)) 107252330Sjeff 108252330Sjeff#define VMEM_NAME_MAX 16 109252330Sjeff 110252330Sjeff/* vmem arena */ 111252330Sjeffstruct vmem { 112252330Sjeff struct mtx_padalign vm_lock; 113252330Sjeff struct cv vm_cv; 114252330Sjeff char vm_name[VMEM_NAME_MAX+1]; 115252330Sjeff LIST_ENTRY(vmem) vm_alllist; 116252330Sjeff struct vmem_hashlist vm_hash0[VMEM_HASHSIZE_MIN]; 117252330Sjeff struct vmem_freelist vm_freelist[VMEM_MAXORDER]; 118252330Sjeff struct vmem_seglist vm_seglist; 119252330Sjeff struct vmem_hashlist *vm_hashlist; 120252330Sjeff vmem_size_t vm_hashsize; 121252330Sjeff 122252330Sjeff /* Constant after init */ 123252330Sjeff vmem_size_t vm_qcache_max; 124252330Sjeff vmem_size_t vm_quantum_mask; 125252330Sjeff vmem_size_t vm_import_quantum; 126252330Sjeff int vm_quantum_shift; 127252330Sjeff 128252330Sjeff /* Written on alloc/free */ 129252330Sjeff LIST_HEAD(, vmem_btag) vm_freetags; 130252330Sjeff int vm_nfreetags; 131252330Sjeff int vm_nbusytag; 132252330Sjeff vmem_size_t vm_inuse; 133252330Sjeff vmem_size_t vm_size; 134252330Sjeff 135252330Sjeff /* Used on import. */ 136252330Sjeff vmem_import_t *vm_importfn; 137252330Sjeff vmem_release_t *vm_releasefn; 138252330Sjeff void *vm_arg; 139252330Sjeff 140252330Sjeff /* Space exhaustion callback. */ 141252330Sjeff vmem_reclaim_t *vm_reclaimfn; 142252330Sjeff 143252330Sjeff /* quantum cache */ 144252330Sjeff qcache_t vm_qcache[VMEM_QCACHE_IDX_MAX]; 145252330Sjeff}; 146252330Sjeff 147252330Sjeff/* boundary tag */ 148252330Sjeffstruct vmem_btag { 149252330Sjeff TAILQ_ENTRY(vmem_btag) bt_seglist; 150252330Sjeff union { 151252330Sjeff LIST_ENTRY(vmem_btag) u_freelist; /* BT_TYPE_FREE */ 152252330Sjeff LIST_ENTRY(vmem_btag) u_hashlist; /* BT_TYPE_BUSY */ 153252330Sjeff } bt_u; 154252330Sjeff#define bt_hashlist bt_u.u_hashlist 155252330Sjeff#define bt_freelist bt_u.u_freelist 156252330Sjeff vmem_addr_t bt_start; 157252330Sjeff vmem_size_t bt_size; 158252330Sjeff int bt_type; 159252330Sjeff}; 160252330Sjeff 161252330Sjeff#define BT_TYPE_SPAN 1 /* Allocated from importfn */ 162252330Sjeff#define BT_TYPE_SPAN_STATIC 2 /* vmem_add() or create. */ 163252330Sjeff#define BT_TYPE_FREE 3 /* Available space. */ 164252330Sjeff#define BT_TYPE_BUSY 4 /* Used space. */ 165252330Sjeff#define BT_ISSPAN_P(bt) ((bt)->bt_type <= BT_TYPE_SPAN_STATIC) 166252330Sjeff 167252330Sjeff#define BT_END(bt) ((bt)->bt_start + (bt)->bt_size - 1) 168252330Sjeff 169252330Sjeff#if defined(DIAGNOSTIC) 170252330Sjeffstatic void vmem_check(vmem_t *); 171252330Sjeff#endif 172252330Sjeff 173252330Sjeffstatic struct callout vmem_periodic_ch; 174252330Sjeffstatic int vmem_periodic_interval; 175252330Sjeffstatic struct task vmem_periodic_wk; 176252330Sjeff 177252330Sjeffstatic struct mtx_padalign vmem_list_lock; 178252330Sjeffstatic LIST_HEAD(, vmem) vmem_list = LIST_HEAD_INITIALIZER(vmem_list); 179252330Sjeff 180252330Sjeff/* ---- misc */ 181252330Sjeff#define VMEM_CONDVAR_INIT(vm, wchan) cv_init(&vm->vm_cv, wchan) 182252330Sjeff#define VMEM_CONDVAR_DESTROY(vm) cv_destroy(&vm->vm_cv) 183252330Sjeff#define VMEM_CONDVAR_WAIT(vm) cv_wait(&vm->vm_cv, &vm->vm_lock) 184252330Sjeff#define VMEM_CONDVAR_BROADCAST(vm) cv_broadcast(&vm->vm_cv) 185252330Sjeff 186252330Sjeff 187252330Sjeff#define VMEM_LOCK(vm) mtx_lock(&vm->vm_lock) 188252330Sjeff#define VMEM_TRYLOCK(vm) mtx_trylock(&vm->vm_lock) 189252330Sjeff#define VMEM_UNLOCK(vm) mtx_unlock(&vm->vm_lock) 190252330Sjeff#define VMEM_LOCK_INIT(vm, name) mtx_init(&vm->vm_lock, (name), NULL, MTX_DEF) 191252330Sjeff#define VMEM_LOCK_DESTROY(vm) mtx_destroy(&vm->vm_lock) 192252330Sjeff#define VMEM_ASSERT_LOCKED(vm) mtx_assert(&vm->vm_lock, MA_OWNED); 193252330Sjeff 194252330Sjeff#define VMEM_ALIGNUP(addr, align) (-(-(addr) & -(align))) 195252330Sjeff 196252330Sjeff#define VMEM_CROSS_P(addr1, addr2, boundary) \ 197252330Sjeff ((((addr1) ^ (addr2)) & -(boundary)) != 0) 198252330Sjeff 199252330Sjeff#define ORDER2SIZE(order) ((vmem_size_t)1 << (order)) 200252330Sjeff#define SIZE2ORDER(size) ((int)flsl(size) - 1) 201252330Sjeff 202252330Sjeff/* 203252330Sjeff * Maximum number of boundary tags that may be required to satisfy an 204252330Sjeff * allocation. Two may be required to import. Another two may be 205252330Sjeff * required to clip edges. 206252330Sjeff */ 207252330Sjeff#define BT_MAXALLOC 4 208252330Sjeff 209252330Sjeff/* 210252330Sjeff * Max free limits the number of locally cached boundary tags. We 211252330Sjeff * just want to avoid hitting the zone allocator for every call. 212252330Sjeff */ 213252330Sjeff#define BT_MAXFREE (BT_MAXALLOC * 8) 214252330Sjeff 215252330Sjeff/* Allocator for boundary tags. */ 216252330Sjeffstatic uma_zone_t vmem_bt_zone; 217252330Sjeff 218252330Sjeff/* boot time arena storage. */ 219254025Sjeffstatic struct vmem kernel_arena_storage; 220254025Sjeffstatic struct vmem kmem_arena_storage; 221252330Sjeffstatic struct vmem buffer_arena_storage; 222252330Sjeffstatic struct vmem transient_arena_storage; 223254025Sjeffvmem_t *kernel_arena = &kernel_arena_storage; 224254025Sjeffvmem_t *kmem_arena = &kmem_arena_storage; 225252330Sjeffvmem_t *buffer_arena = &buffer_arena_storage; 226252330Sjeffvmem_t *transient_arena = &transient_arena_storage; 227252330Sjeff 228254307Sjeff#ifdef DEBUG_MEMGUARD 229254307Sjeffstatic struct vmem memguard_arena_storage; 230254307Sjeffvmem_t *memguard_arena = &memguard_arena_storage; 231254307Sjeff#endif 232254307Sjeff 233252330Sjeff/* 234252330Sjeff * Fill the vmem's boundary tag cache. We guarantee that boundary tag 235252330Sjeff * allocation will not fail once bt_fill() passes. To do so we cache 236252330Sjeff * at least the maximum possible tag allocations in the arena. 237252330Sjeff */ 238252330Sjeffstatic int 239252330Sjeffbt_fill(vmem_t *vm, int flags) 240252330Sjeff{ 241252330Sjeff bt_t *bt; 242252330Sjeff 243252330Sjeff VMEM_ASSERT_LOCKED(vm); 244252330Sjeff 245252330Sjeff /* 246254025Sjeff * Only allow the kmem arena to dip into reserve tags. It is the 247254025Sjeff * vmem where new tags come from. 248254025Sjeff */ 249254025Sjeff flags &= BT_FLAGS; 250254025Sjeff if (vm != kmem_arena) 251254025Sjeff flags &= ~M_USE_RESERVE; 252254025Sjeff 253254025Sjeff /* 254252330Sjeff * Loop until we meet the reserve. To minimize the lock shuffle 255252330Sjeff * and prevent simultaneous fills we first try a NOWAIT regardless 256252330Sjeff * of the caller's flags. Specify M_NOVM so we don't recurse while 257252330Sjeff * holding a vmem lock. 258252330Sjeff */ 259252330Sjeff while (vm->vm_nfreetags < BT_MAXALLOC) { 260252330Sjeff bt = uma_zalloc(vmem_bt_zone, 261252330Sjeff (flags & M_USE_RESERVE) | M_NOWAIT | M_NOVM); 262252330Sjeff if (bt == NULL) { 263252330Sjeff VMEM_UNLOCK(vm); 264252330Sjeff bt = uma_zalloc(vmem_bt_zone, flags); 265252330Sjeff VMEM_LOCK(vm); 266252330Sjeff if (bt == NULL && (flags & M_NOWAIT) != 0) 267252330Sjeff break; 268252330Sjeff } 269252330Sjeff LIST_INSERT_HEAD(&vm->vm_freetags, bt, bt_freelist); 270252330Sjeff vm->vm_nfreetags++; 271252330Sjeff } 272252330Sjeff 273252330Sjeff if (vm->vm_nfreetags < BT_MAXALLOC) 274252330Sjeff return ENOMEM; 275252330Sjeff 276252330Sjeff return 0; 277252330Sjeff} 278252330Sjeff 279252330Sjeff/* 280252330Sjeff * Pop a tag off of the freetag stack. 281252330Sjeff */ 282252330Sjeffstatic bt_t * 283252330Sjeffbt_alloc(vmem_t *vm) 284252330Sjeff{ 285252330Sjeff bt_t *bt; 286252330Sjeff 287252330Sjeff VMEM_ASSERT_LOCKED(vm); 288252330Sjeff bt = LIST_FIRST(&vm->vm_freetags); 289252330Sjeff MPASS(bt != NULL); 290252330Sjeff LIST_REMOVE(bt, bt_freelist); 291252330Sjeff vm->vm_nfreetags--; 292252330Sjeff 293252330Sjeff return bt; 294252330Sjeff} 295252330Sjeff 296252330Sjeff/* 297252330Sjeff * Trim the per-vmem free list. Returns with the lock released to 298252330Sjeff * avoid allocator recursions. 299252330Sjeff */ 300252330Sjeffstatic void 301252330Sjeffbt_freetrim(vmem_t *vm, int freelimit) 302252330Sjeff{ 303252330Sjeff LIST_HEAD(, vmem_btag) freetags; 304252330Sjeff bt_t *bt; 305252330Sjeff 306252330Sjeff LIST_INIT(&freetags); 307252330Sjeff VMEM_ASSERT_LOCKED(vm); 308252330Sjeff while (vm->vm_nfreetags > freelimit) { 309252330Sjeff bt = LIST_FIRST(&vm->vm_freetags); 310252330Sjeff LIST_REMOVE(bt, bt_freelist); 311252330Sjeff vm->vm_nfreetags--; 312252330Sjeff LIST_INSERT_HEAD(&freetags, bt, bt_freelist); 313252330Sjeff } 314252330Sjeff VMEM_UNLOCK(vm); 315252330Sjeff while ((bt = LIST_FIRST(&freetags)) != NULL) { 316252330Sjeff LIST_REMOVE(bt, bt_freelist); 317252330Sjeff uma_zfree(vmem_bt_zone, bt); 318252330Sjeff } 319252330Sjeff} 320252330Sjeff 321252330Sjeffstatic inline void 322252330Sjeffbt_free(vmem_t *vm, bt_t *bt) 323252330Sjeff{ 324252330Sjeff 325252330Sjeff VMEM_ASSERT_LOCKED(vm); 326252330Sjeff MPASS(LIST_FIRST(&vm->vm_freetags) != bt); 327252330Sjeff LIST_INSERT_HEAD(&vm->vm_freetags, bt, bt_freelist); 328252330Sjeff vm->vm_nfreetags++; 329252330Sjeff} 330252330Sjeff 331252330Sjeff/* 332252330Sjeff * freelist[0] ... [1, 1] 333252330Sjeff * freelist[1] ... [2, 3] 334252330Sjeff * freelist[2] ... [4, 7] 335252330Sjeff * freelist[3] ... [8, 15] 336252330Sjeff * : 337252330Sjeff * freelist[n] ... [(1 << n), (1 << (n + 1)) - 1] 338252330Sjeff * : 339252330Sjeff */ 340252330Sjeff 341252330Sjeffstatic struct vmem_freelist * 342252330Sjeffbt_freehead_tofree(vmem_t *vm, vmem_size_t size) 343252330Sjeff{ 344252330Sjeff const vmem_size_t qsize = size >> vm->vm_quantum_shift; 345252330Sjeff const int idx = SIZE2ORDER(qsize); 346252330Sjeff 347252330Sjeff MPASS(size != 0 && qsize != 0); 348252330Sjeff MPASS((size & vm->vm_quantum_mask) == 0); 349252330Sjeff MPASS(idx >= 0); 350252330Sjeff MPASS(idx < VMEM_MAXORDER); 351252330Sjeff 352252330Sjeff return &vm->vm_freelist[idx]; 353252330Sjeff} 354252330Sjeff 355252330Sjeff/* 356252330Sjeff * bt_freehead_toalloc: return the freelist for the given size and allocation 357252330Sjeff * strategy. 358252330Sjeff * 359252330Sjeff * For M_FIRSTFIT, return the list in which any blocks are large enough 360252330Sjeff * for the requested size. otherwise, return the list which can have blocks 361252330Sjeff * large enough for the requested size. 362252330Sjeff */ 363252330Sjeffstatic struct vmem_freelist * 364252330Sjeffbt_freehead_toalloc(vmem_t *vm, vmem_size_t size, int strat) 365252330Sjeff{ 366252330Sjeff const vmem_size_t qsize = size >> vm->vm_quantum_shift; 367252330Sjeff int idx = SIZE2ORDER(qsize); 368252330Sjeff 369252330Sjeff MPASS(size != 0 && qsize != 0); 370252330Sjeff MPASS((size & vm->vm_quantum_mask) == 0); 371252330Sjeff 372252330Sjeff if (strat == M_FIRSTFIT && ORDER2SIZE(idx) != qsize) { 373252330Sjeff idx++; 374252330Sjeff /* check too large request? */ 375252330Sjeff } 376252330Sjeff MPASS(idx >= 0); 377252330Sjeff MPASS(idx < VMEM_MAXORDER); 378252330Sjeff 379252330Sjeff return &vm->vm_freelist[idx]; 380252330Sjeff} 381252330Sjeff 382252330Sjeff/* ---- boundary tag hash */ 383252330Sjeff 384252330Sjeffstatic struct vmem_hashlist * 385252330Sjeffbt_hashhead(vmem_t *vm, vmem_addr_t addr) 386252330Sjeff{ 387252330Sjeff struct vmem_hashlist *list; 388252330Sjeff unsigned int hash; 389252330Sjeff 390252330Sjeff hash = hash32_buf(&addr, sizeof(addr), 0); 391252330Sjeff list = &vm->vm_hashlist[hash % vm->vm_hashsize]; 392252330Sjeff 393252330Sjeff return list; 394252330Sjeff} 395252330Sjeff 396252330Sjeffstatic bt_t * 397252330Sjeffbt_lookupbusy(vmem_t *vm, vmem_addr_t addr) 398252330Sjeff{ 399252330Sjeff struct vmem_hashlist *list; 400252330Sjeff bt_t *bt; 401252330Sjeff 402252330Sjeff VMEM_ASSERT_LOCKED(vm); 403252330Sjeff list = bt_hashhead(vm, addr); 404252330Sjeff LIST_FOREACH(bt, list, bt_hashlist) { 405252330Sjeff if (bt->bt_start == addr) { 406252330Sjeff break; 407252330Sjeff } 408252330Sjeff } 409252330Sjeff 410252330Sjeff return bt; 411252330Sjeff} 412252330Sjeff 413252330Sjeffstatic void 414252330Sjeffbt_rembusy(vmem_t *vm, bt_t *bt) 415252330Sjeff{ 416252330Sjeff 417252330Sjeff VMEM_ASSERT_LOCKED(vm); 418252330Sjeff MPASS(vm->vm_nbusytag > 0); 419252330Sjeff vm->vm_inuse -= bt->bt_size; 420252330Sjeff vm->vm_nbusytag--; 421252330Sjeff LIST_REMOVE(bt, bt_hashlist); 422252330Sjeff} 423252330Sjeff 424252330Sjeffstatic void 425252330Sjeffbt_insbusy(vmem_t *vm, bt_t *bt) 426252330Sjeff{ 427252330Sjeff struct vmem_hashlist *list; 428252330Sjeff 429252330Sjeff VMEM_ASSERT_LOCKED(vm); 430252330Sjeff MPASS(bt->bt_type == BT_TYPE_BUSY); 431252330Sjeff 432252330Sjeff list = bt_hashhead(vm, bt->bt_start); 433252330Sjeff LIST_INSERT_HEAD(list, bt, bt_hashlist); 434252330Sjeff vm->vm_nbusytag++; 435252330Sjeff vm->vm_inuse += bt->bt_size; 436252330Sjeff} 437252330Sjeff 438252330Sjeff/* ---- boundary tag list */ 439252330Sjeff 440252330Sjeffstatic void 441252330Sjeffbt_remseg(vmem_t *vm, bt_t *bt) 442252330Sjeff{ 443252330Sjeff 444252330Sjeff TAILQ_REMOVE(&vm->vm_seglist, bt, bt_seglist); 445252330Sjeff bt_free(vm, bt); 446252330Sjeff} 447252330Sjeff 448252330Sjeffstatic void 449252330Sjeffbt_insseg(vmem_t *vm, bt_t *bt, bt_t *prev) 450252330Sjeff{ 451252330Sjeff 452252330Sjeff TAILQ_INSERT_AFTER(&vm->vm_seglist, prev, bt, bt_seglist); 453252330Sjeff} 454252330Sjeff 455252330Sjeffstatic void 456252330Sjeffbt_insseg_tail(vmem_t *vm, bt_t *bt) 457252330Sjeff{ 458252330Sjeff 459252330Sjeff TAILQ_INSERT_TAIL(&vm->vm_seglist, bt, bt_seglist); 460252330Sjeff} 461252330Sjeff 462252330Sjeffstatic void 463252330Sjeffbt_remfree(vmem_t *vm, bt_t *bt) 464252330Sjeff{ 465252330Sjeff 466252330Sjeff MPASS(bt->bt_type == BT_TYPE_FREE); 467252330Sjeff 468252330Sjeff LIST_REMOVE(bt, bt_freelist); 469252330Sjeff} 470252330Sjeff 471252330Sjeffstatic void 472252330Sjeffbt_insfree(vmem_t *vm, bt_t *bt) 473252330Sjeff{ 474252330Sjeff struct vmem_freelist *list; 475252330Sjeff 476252330Sjeff list = bt_freehead_tofree(vm, bt->bt_size); 477252330Sjeff LIST_INSERT_HEAD(list, bt, bt_freelist); 478252330Sjeff} 479252330Sjeff 480252330Sjeff/* ---- vmem internal functions */ 481252330Sjeff 482252330Sjeff/* 483252330Sjeff * Import from the arena into the quantum cache in UMA. 484252330Sjeff */ 485252330Sjeffstatic int 486252330Sjeffqc_import(void *arg, void **store, int cnt, int flags) 487252330Sjeff{ 488252330Sjeff qcache_t *qc; 489252330Sjeff vmem_addr_t addr; 490252330Sjeff int i; 491252330Sjeff 492252330Sjeff qc = arg; 493252330Sjeff flags |= M_BESTFIT; 494252330Sjeff for (i = 0; i < cnt; i++) { 495252330Sjeff if (vmem_xalloc(qc->qc_vmem, qc->qc_size, 0, 0, 0, 496252330Sjeff VMEM_ADDR_MIN, VMEM_ADDR_MAX, flags, &addr) != 0) 497252330Sjeff break; 498252330Sjeff store[i] = (void *)addr; 499252330Sjeff /* Only guarantee one allocation. */ 500252330Sjeff flags &= ~M_WAITOK; 501252330Sjeff flags |= M_NOWAIT; 502252330Sjeff } 503252330Sjeff return i; 504252330Sjeff} 505252330Sjeff 506252330Sjeff/* 507252330Sjeff * Release memory from the UMA cache to the arena. 508252330Sjeff */ 509252330Sjeffstatic void 510252330Sjeffqc_release(void *arg, void **store, int cnt) 511252330Sjeff{ 512252330Sjeff qcache_t *qc; 513252330Sjeff int i; 514252330Sjeff 515252330Sjeff qc = arg; 516252330Sjeff for (i = 0; i < cnt; i++) 517252330Sjeff vmem_xfree(qc->qc_vmem, (vmem_addr_t)store[i], qc->qc_size); 518252330Sjeff} 519252330Sjeff 520252330Sjeffstatic void 521252330Sjeffqc_init(vmem_t *vm, vmem_size_t qcache_max) 522252330Sjeff{ 523252330Sjeff qcache_t *qc; 524252330Sjeff vmem_size_t size; 525252330Sjeff int qcache_idx_max; 526252330Sjeff int i; 527252330Sjeff 528252330Sjeff MPASS((qcache_max & vm->vm_quantum_mask) == 0); 529252330Sjeff qcache_idx_max = MIN(qcache_max >> vm->vm_quantum_shift, 530252330Sjeff VMEM_QCACHE_IDX_MAX); 531252330Sjeff vm->vm_qcache_max = qcache_idx_max << vm->vm_quantum_shift; 532252330Sjeff for (i = 0; i < qcache_idx_max; i++) { 533252330Sjeff qc = &vm->vm_qcache[i]; 534252330Sjeff size = (i + 1) << vm->vm_quantum_shift; 535252330Sjeff snprintf(qc->qc_name, sizeof(qc->qc_name), "%s-%zu", 536252330Sjeff vm->vm_name, size); 537252330Sjeff qc->qc_vmem = vm; 538252330Sjeff qc->qc_size = size; 539252330Sjeff qc->qc_cache = uma_zcache_create(qc->qc_name, size, 540252330Sjeff NULL, NULL, NULL, NULL, qc_import, qc_release, qc, 541252330Sjeff UMA_ZONE_VM); 542252330Sjeff MPASS(qc->qc_cache); 543252330Sjeff } 544252330Sjeff} 545252330Sjeff 546252330Sjeffstatic void 547252330Sjeffqc_destroy(vmem_t *vm) 548252330Sjeff{ 549252330Sjeff int qcache_idx_max; 550252330Sjeff int i; 551252330Sjeff 552252330Sjeff qcache_idx_max = vm->vm_qcache_max >> vm->vm_quantum_shift; 553252330Sjeff for (i = 0; i < qcache_idx_max; i++) 554252330Sjeff uma_zdestroy(vm->vm_qcache[i].qc_cache); 555252330Sjeff} 556252330Sjeff 557252330Sjeffstatic void 558252330Sjeffqc_drain(vmem_t *vm) 559252330Sjeff{ 560252330Sjeff int qcache_idx_max; 561252330Sjeff int i; 562252330Sjeff 563252330Sjeff qcache_idx_max = vm->vm_qcache_max >> vm->vm_quantum_shift; 564252330Sjeff for (i = 0; i < qcache_idx_max; i++) 565252330Sjeff zone_drain(vm->vm_qcache[i].qc_cache); 566252330Sjeff} 567252330Sjeff 568254025Sjeff#ifndef UMA_MD_SMALL_ALLOC 569254025Sjeff 570254025Sjeffstatic struct mtx_padalign vmem_bt_lock; 571254025Sjeff 572254025Sjeff/* 573254025Sjeff * vmem_bt_alloc: Allocate a new page of boundary tags. 574254025Sjeff * 575254025Sjeff * On architectures with uma_small_alloc there is no recursion; no address 576254025Sjeff * space need be allocated to allocate boundary tags. For the others, we 577254025Sjeff * must handle recursion. Boundary tags are necessary to allocate new 578254025Sjeff * boundary tags. 579254025Sjeff * 580254025Sjeff * UMA guarantees that enough tags are held in reserve to allocate a new 581254025Sjeff * page of kva. We dip into this reserve by specifying M_USE_RESERVE only 582254025Sjeff * when allocating the page to hold new boundary tags. In this way the 583254025Sjeff * reserve is automatically filled by the allocation that uses the reserve. 584254025Sjeff * 585254025Sjeff * We still have to guarantee that the new tags are allocated atomically since 586254025Sjeff * many threads may try concurrently. The bt_lock provides this guarantee. 587254025Sjeff * We convert WAITOK allocations to NOWAIT and then handle the blocking here 588254025Sjeff * on failure. It's ok to return NULL for a WAITOK allocation as UMA will 589254025Sjeff * loop again after checking to see if we lost the race to allocate. 590254025Sjeff * 591254025Sjeff * There is a small race between vmem_bt_alloc() returning the page and the 592254025Sjeff * zone lock being acquired to add the page to the zone. For WAITOK 593254025Sjeff * allocations we just pause briefly. NOWAIT may experience a transient 594254025Sjeff * failure. To alleviate this we permit a small number of simultaneous 595254025Sjeff * fills to proceed concurrently so NOWAIT is less likely to fail unless 596254025Sjeff * we are really out of KVA. 597254025Sjeff */ 598254025Sjeffstatic void * 599254025Sjeffvmem_bt_alloc(uma_zone_t zone, int bytes, uint8_t *pflag, int wait) 600254025Sjeff{ 601254025Sjeff vmem_addr_t addr; 602254025Sjeff 603254025Sjeff *pflag = UMA_SLAB_KMEM; 604254025Sjeff 605254025Sjeff /* 606254025Sjeff * Single thread boundary tag allocation so that the address space 607254025Sjeff * and memory are added in one atomic operation. 608254025Sjeff */ 609254025Sjeff mtx_lock(&vmem_bt_lock); 610254025Sjeff if (vmem_xalloc(kmem_arena, bytes, 0, 0, 0, VMEM_ADDR_MIN, 611254025Sjeff VMEM_ADDR_MAX, M_NOWAIT | M_NOVM | M_USE_RESERVE | M_BESTFIT, 612254025Sjeff &addr) == 0) { 613254025Sjeff if (kmem_back(kmem_object, addr, bytes, 614254025Sjeff M_NOWAIT | M_USE_RESERVE) == 0) { 615254025Sjeff mtx_unlock(&vmem_bt_lock); 616254025Sjeff return ((void *)addr); 617254025Sjeff } 618254025Sjeff vmem_xfree(kmem_arena, addr, bytes); 619254025Sjeff mtx_unlock(&vmem_bt_lock); 620254025Sjeff /* 621254025Sjeff * Out of memory, not address space. This may not even be 622254025Sjeff * possible due to M_USE_RESERVE page allocation. 623254025Sjeff */ 624254025Sjeff if (wait & M_WAITOK) 625254025Sjeff VM_WAIT; 626254025Sjeff return (NULL); 627254025Sjeff } 628254025Sjeff mtx_unlock(&vmem_bt_lock); 629254025Sjeff /* 630254025Sjeff * We're either out of address space or lost a fill race. 631254025Sjeff */ 632254025Sjeff if (wait & M_WAITOK) 633254025Sjeff pause("btalloc", 1); 634254025Sjeff 635254025Sjeff return (NULL); 636254025Sjeff} 637254025Sjeff#endif 638254025Sjeff 639252330Sjeffvoid 640252330Sjeffvmem_startup(void) 641252330Sjeff{ 642252330Sjeff 643252330Sjeff mtx_init(&vmem_list_lock, "vmem list lock", NULL, MTX_DEF); 644252330Sjeff vmem_bt_zone = uma_zcreate("vmem btag", 645252330Sjeff sizeof(struct vmem_btag), NULL, NULL, NULL, NULL, 646252330Sjeff UMA_ALIGN_PTR, UMA_ZONE_VM); 647254025Sjeff#ifndef UMA_MD_SMALL_ALLOC 648254025Sjeff mtx_init(&vmem_bt_lock, "btag lock", NULL, MTX_DEF); 649254025Sjeff uma_prealloc(vmem_bt_zone, BT_MAXALLOC); 650254025Sjeff /* 651254025Sjeff * Reserve enough tags to allocate new tags. We allow multiple 652254025Sjeff * CPUs to attempt to allocate new tags concurrently to limit 653254025Sjeff * false restarts in UMA. 654254025Sjeff */ 655254025Sjeff uma_zone_reserve(vmem_bt_zone, BT_MAXALLOC * (mp_ncpus + 1) / 2); 656254025Sjeff uma_zone_set_allocf(vmem_bt_zone, vmem_bt_alloc); 657254025Sjeff#endif 658252330Sjeff} 659252330Sjeff 660252330Sjeff/* ---- rehash */ 661252330Sjeff 662252330Sjeffstatic int 663252330Sjeffvmem_rehash(vmem_t *vm, vmem_size_t newhashsize) 664252330Sjeff{ 665252330Sjeff bt_t *bt; 666252330Sjeff int i; 667252330Sjeff struct vmem_hashlist *newhashlist; 668252330Sjeff struct vmem_hashlist *oldhashlist; 669252330Sjeff vmem_size_t oldhashsize; 670252330Sjeff 671252330Sjeff MPASS(newhashsize > 0); 672252330Sjeff 673252330Sjeff newhashlist = malloc(sizeof(struct vmem_hashlist) * newhashsize, 674252330Sjeff M_VMEM, M_NOWAIT); 675252330Sjeff if (newhashlist == NULL) 676252330Sjeff return ENOMEM; 677252330Sjeff for (i = 0; i < newhashsize; i++) { 678252330Sjeff LIST_INIT(&newhashlist[i]); 679252330Sjeff } 680252330Sjeff 681252330Sjeff VMEM_LOCK(vm); 682252330Sjeff oldhashlist = vm->vm_hashlist; 683252330Sjeff oldhashsize = vm->vm_hashsize; 684252330Sjeff vm->vm_hashlist = newhashlist; 685252330Sjeff vm->vm_hashsize = newhashsize; 686252330Sjeff if (oldhashlist == NULL) { 687252330Sjeff VMEM_UNLOCK(vm); 688252330Sjeff return 0; 689252330Sjeff } 690252330Sjeff for (i = 0; i < oldhashsize; i++) { 691252330Sjeff while ((bt = LIST_FIRST(&oldhashlist[i])) != NULL) { 692252330Sjeff bt_rembusy(vm, bt); 693252330Sjeff bt_insbusy(vm, bt); 694252330Sjeff } 695252330Sjeff } 696252330Sjeff VMEM_UNLOCK(vm); 697252330Sjeff 698252330Sjeff if (oldhashlist != vm->vm_hash0) { 699252330Sjeff free(oldhashlist, M_VMEM); 700252330Sjeff } 701252330Sjeff 702252330Sjeff return 0; 703252330Sjeff} 704252330Sjeff 705252330Sjeffstatic void 706252330Sjeffvmem_periodic_kick(void *dummy) 707252330Sjeff{ 708252330Sjeff 709252330Sjeff taskqueue_enqueue(taskqueue_thread, &vmem_periodic_wk); 710252330Sjeff} 711252330Sjeff 712252330Sjeffstatic void 713252330Sjeffvmem_periodic(void *unused, int pending) 714252330Sjeff{ 715252330Sjeff vmem_t *vm; 716252330Sjeff vmem_size_t desired; 717252330Sjeff vmem_size_t current; 718252330Sjeff 719252330Sjeff mtx_lock(&vmem_list_lock); 720252330Sjeff LIST_FOREACH(vm, &vmem_list, vm_alllist) { 721252330Sjeff#ifdef DIAGNOSTIC 722252330Sjeff /* Convenient time to verify vmem state. */ 723252330Sjeff VMEM_LOCK(vm); 724252330Sjeff vmem_check(vm); 725252330Sjeff VMEM_UNLOCK(vm); 726252330Sjeff#endif 727252330Sjeff desired = 1 << flsl(vm->vm_nbusytag); 728252330Sjeff desired = MIN(MAX(desired, VMEM_HASHSIZE_MIN), 729252330Sjeff VMEM_HASHSIZE_MAX); 730252330Sjeff current = vm->vm_hashsize; 731252330Sjeff 732252330Sjeff /* Grow in powers of two. Shrink less aggressively. */ 733252330Sjeff if (desired >= current * 2 || desired * 4 <= current) 734252330Sjeff vmem_rehash(vm, desired); 735252330Sjeff } 736252330Sjeff mtx_unlock(&vmem_list_lock); 737252330Sjeff 738252330Sjeff callout_reset(&vmem_periodic_ch, vmem_periodic_interval, 739252330Sjeff vmem_periodic_kick, NULL); 740252330Sjeff} 741252330Sjeff 742252330Sjeffstatic void 743252330Sjeffvmem_start_callout(void *unused) 744252330Sjeff{ 745252330Sjeff 746252330Sjeff TASK_INIT(&vmem_periodic_wk, 0, vmem_periodic, NULL); 747252330Sjeff vmem_periodic_interval = hz * 10; 748252330Sjeff callout_init(&vmem_periodic_ch, CALLOUT_MPSAFE); 749252330Sjeff callout_reset(&vmem_periodic_ch, vmem_periodic_interval, 750252330Sjeff vmem_periodic_kick, NULL); 751252330Sjeff} 752252330SjeffSYSINIT(vfs, SI_SUB_CONFIGURE, SI_ORDER_ANY, vmem_start_callout, NULL); 753252330Sjeff 754252330Sjeffstatic void 755253596Sglebiusvmem_add1(vmem_t *vm, vmem_addr_t addr, vmem_size_t size, int type) 756252330Sjeff{ 757252330Sjeff bt_t *btspan; 758252330Sjeff bt_t *btfree; 759252330Sjeff 760252330Sjeff MPASS(type == BT_TYPE_SPAN || type == BT_TYPE_SPAN_STATIC); 761252330Sjeff 762252330Sjeff btspan = bt_alloc(vm); 763252330Sjeff btspan->bt_type = type; 764252330Sjeff btspan->bt_start = addr; 765252330Sjeff btspan->bt_size = size; 766254025Sjeff bt_insseg_tail(vm, btspan); 767252330Sjeff 768252330Sjeff btfree = bt_alloc(vm); 769252330Sjeff btfree->bt_type = BT_TYPE_FREE; 770252330Sjeff btfree->bt_start = addr; 771252330Sjeff btfree->bt_size = size; 772252330Sjeff bt_insseg(vm, btfree, btspan); 773252330Sjeff bt_insfree(vm, btfree); 774254025Sjeff 775252330Sjeff vm->vm_size += size; 776252330Sjeff} 777252330Sjeff 778252330Sjeffstatic void 779252330Sjeffvmem_destroy1(vmem_t *vm) 780252330Sjeff{ 781252330Sjeff bt_t *bt; 782252330Sjeff 783252330Sjeff /* 784252330Sjeff * Drain per-cpu quantum caches. 785252330Sjeff */ 786252330Sjeff qc_destroy(vm); 787252330Sjeff 788252330Sjeff /* 789252330Sjeff * The vmem should now only contain empty segments. 790252330Sjeff */ 791252330Sjeff VMEM_LOCK(vm); 792252330Sjeff MPASS(vm->vm_nbusytag == 0); 793252330Sjeff 794252330Sjeff while ((bt = TAILQ_FIRST(&vm->vm_seglist)) != NULL) 795252330Sjeff bt_remseg(vm, bt); 796252330Sjeff 797252330Sjeff if (vm->vm_hashlist != NULL && vm->vm_hashlist != vm->vm_hash0) 798252330Sjeff free(vm->vm_hashlist, M_VMEM); 799252330Sjeff 800252330Sjeff bt_freetrim(vm, 0); 801252330Sjeff 802252330Sjeff VMEM_CONDVAR_DESTROY(vm); 803252330Sjeff VMEM_LOCK_DESTROY(vm); 804252330Sjeff free(vm, M_VMEM); 805252330Sjeff} 806252330Sjeff 807252330Sjeffstatic int 808252330Sjeffvmem_import(vmem_t *vm, vmem_size_t size, int flags) 809252330Sjeff{ 810252330Sjeff vmem_addr_t addr; 811252330Sjeff int error; 812252330Sjeff 813252330Sjeff if (vm->vm_importfn == NULL) 814252330Sjeff return EINVAL; 815252330Sjeff 816252330Sjeff size = roundup(size, vm->vm_import_quantum); 817252330Sjeff 818252330Sjeff /* 819252330Sjeff * Hide MAXALLOC tags so we're guaranteed to be able to add this 820252330Sjeff * span and the tag we want to allocate from it. 821252330Sjeff */ 822252330Sjeff MPASS(vm->vm_nfreetags >= BT_MAXALLOC); 823252330Sjeff vm->vm_nfreetags -= BT_MAXALLOC; 824252330Sjeff VMEM_UNLOCK(vm); 825252330Sjeff error = (vm->vm_importfn)(vm->vm_arg, size, flags, &addr); 826252330Sjeff VMEM_LOCK(vm); 827252330Sjeff vm->vm_nfreetags += BT_MAXALLOC; 828252330Sjeff if (error) 829252330Sjeff return ENOMEM; 830252330Sjeff 831253596Sglebius vmem_add1(vm, addr, size, BT_TYPE_SPAN); 832252330Sjeff 833252330Sjeff return 0; 834252330Sjeff} 835252330Sjeff 836252330Sjeff/* 837252330Sjeff * vmem_fit: check if a bt can satisfy the given restrictions. 838252330Sjeff * 839252330Sjeff * it's a caller's responsibility to ensure the region is big enough 840252330Sjeff * before calling us. 841252330Sjeff */ 842252330Sjeffstatic int 843252330Sjeffvmem_fit(const bt_t *bt, vmem_size_t size, vmem_size_t align, 844252330Sjeff vmem_size_t phase, vmem_size_t nocross, vmem_addr_t minaddr, 845252330Sjeff vmem_addr_t maxaddr, vmem_addr_t *addrp) 846252330Sjeff{ 847252330Sjeff vmem_addr_t start; 848252330Sjeff vmem_addr_t end; 849252330Sjeff 850252330Sjeff MPASS(size > 0); 851252330Sjeff MPASS(bt->bt_size >= size); /* caller's responsibility */ 852252330Sjeff 853252330Sjeff /* 854252330Sjeff * XXX assumption: vmem_addr_t and vmem_size_t are 855252330Sjeff * unsigned integer of the same size. 856252330Sjeff */ 857252330Sjeff 858252330Sjeff start = bt->bt_start; 859252330Sjeff if (start < minaddr) { 860252330Sjeff start = minaddr; 861252330Sjeff } 862252330Sjeff end = BT_END(bt); 863252330Sjeff if (end > maxaddr) 864252330Sjeff end = maxaddr; 865252330Sjeff if (start > end) 866252330Sjeff return (ENOMEM); 867252330Sjeff 868252330Sjeff start = VMEM_ALIGNUP(start - phase, align) + phase; 869252330Sjeff if (start < bt->bt_start) 870252330Sjeff start += align; 871252330Sjeff if (VMEM_CROSS_P(start, start + size - 1, nocross)) { 872252330Sjeff MPASS(align < nocross); 873252330Sjeff start = VMEM_ALIGNUP(start - phase, nocross) + phase; 874252330Sjeff } 875252330Sjeff if (start <= end && end - start >= size - 1) { 876252330Sjeff MPASS((start & (align - 1)) == phase); 877252330Sjeff MPASS(!VMEM_CROSS_P(start, start + size - 1, nocross)); 878252330Sjeff MPASS(minaddr <= start); 879252330Sjeff MPASS(maxaddr == 0 || start + size - 1 <= maxaddr); 880252330Sjeff MPASS(bt->bt_start <= start); 881252330Sjeff MPASS(BT_END(bt) - start >= size - 1); 882252330Sjeff *addrp = start; 883252330Sjeff 884252330Sjeff return (0); 885252330Sjeff } 886252330Sjeff return (ENOMEM); 887252330Sjeff} 888252330Sjeff 889252330Sjeff/* 890252330Sjeff * vmem_clip: Trim the boundary tag edges to the requested start and size. 891252330Sjeff */ 892252330Sjeffstatic void 893252330Sjeffvmem_clip(vmem_t *vm, bt_t *bt, vmem_addr_t start, vmem_size_t size) 894252330Sjeff{ 895252330Sjeff bt_t *btnew; 896252330Sjeff bt_t *btprev; 897252330Sjeff 898252330Sjeff VMEM_ASSERT_LOCKED(vm); 899252330Sjeff MPASS(bt->bt_type == BT_TYPE_FREE); 900252330Sjeff MPASS(bt->bt_size >= size); 901252330Sjeff bt_remfree(vm, bt); 902252330Sjeff if (bt->bt_start != start) { 903252330Sjeff btprev = bt_alloc(vm); 904252330Sjeff btprev->bt_type = BT_TYPE_FREE; 905252330Sjeff btprev->bt_start = bt->bt_start; 906252330Sjeff btprev->bt_size = start - bt->bt_start; 907252330Sjeff bt->bt_start = start; 908252330Sjeff bt->bt_size -= btprev->bt_size; 909252330Sjeff bt_insfree(vm, btprev); 910252330Sjeff bt_insseg(vm, btprev, 911252330Sjeff TAILQ_PREV(bt, vmem_seglist, bt_seglist)); 912252330Sjeff } 913252330Sjeff MPASS(bt->bt_start == start); 914252330Sjeff if (bt->bt_size != size && bt->bt_size - size > vm->vm_quantum_mask) { 915252330Sjeff /* split */ 916252330Sjeff btnew = bt_alloc(vm); 917252330Sjeff btnew->bt_type = BT_TYPE_BUSY; 918252330Sjeff btnew->bt_start = bt->bt_start; 919252330Sjeff btnew->bt_size = size; 920252330Sjeff bt->bt_start = bt->bt_start + size; 921252330Sjeff bt->bt_size -= size; 922252330Sjeff bt_insfree(vm, bt); 923252330Sjeff bt_insseg(vm, btnew, 924252330Sjeff TAILQ_PREV(bt, vmem_seglist, bt_seglist)); 925252330Sjeff bt_insbusy(vm, btnew); 926252330Sjeff bt = btnew; 927252330Sjeff } else { 928252330Sjeff bt->bt_type = BT_TYPE_BUSY; 929252330Sjeff bt_insbusy(vm, bt); 930252330Sjeff } 931252330Sjeff MPASS(bt->bt_size >= size); 932252330Sjeff bt->bt_type = BT_TYPE_BUSY; 933252330Sjeff} 934252330Sjeff 935252330Sjeff/* ---- vmem API */ 936252330Sjeff 937252330Sjeffvoid 938252330Sjeffvmem_set_import(vmem_t *vm, vmem_import_t *importfn, 939252330Sjeff vmem_release_t *releasefn, void *arg, vmem_size_t import_quantum) 940252330Sjeff{ 941252330Sjeff 942252330Sjeff VMEM_LOCK(vm); 943252330Sjeff vm->vm_importfn = importfn; 944252330Sjeff vm->vm_releasefn = releasefn; 945252330Sjeff vm->vm_arg = arg; 946252330Sjeff vm->vm_import_quantum = import_quantum; 947252330Sjeff VMEM_UNLOCK(vm); 948252330Sjeff} 949252330Sjeff 950252330Sjeffvoid 951252330Sjeffvmem_set_reclaim(vmem_t *vm, vmem_reclaim_t *reclaimfn) 952252330Sjeff{ 953252330Sjeff 954252330Sjeff VMEM_LOCK(vm); 955252330Sjeff vm->vm_reclaimfn = reclaimfn; 956252330Sjeff VMEM_UNLOCK(vm); 957252330Sjeff} 958252330Sjeff 959252330Sjeff/* 960252330Sjeff * vmem_init: Initializes vmem arena. 961252330Sjeff */ 962252330Sjeffvmem_t * 963252330Sjeffvmem_init(vmem_t *vm, const char *name, vmem_addr_t base, vmem_size_t size, 964252330Sjeff vmem_size_t quantum, vmem_size_t qcache_max, int flags) 965252330Sjeff{ 966252330Sjeff int i; 967252330Sjeff 968252330Sjeff MPASS(quantum > 0); 969252330Sjeff 970252330Sjeff bzero(vm, sizeof(*vm)); 971252330Sjeff 972252330Sjeff VMEM_CONDVAR_INIT(vm, name); 973252330Sjeff VMEM_LOCK_INIT(vm, name); 974252330Sjeff vm->vm_nfreetags = 0; 975252330Sjeff LIST_INIT(&vm->vm_freetags); 976252330Sjeff strlcpy(vm->vm_name, name, sizeof(vm->vm_name)); 977252330Sjeff vm->vm_quantum_mask = quantum - 1; 978252330Sjeff vm->vm_quantum_shift = SIZE2ORDER(quantum); 979252330Sjeff MPASS(ORDER2SIZE(vm->vm_quantum_shift) == quantum); 980252330Sjeff vm->vm_nbusytag = 0; 981252330Sjeff vm->vm_size = 0; 982252330Sjeff vm->vm_inuse = 0; 983252330Sjeff qc_init(vm, qcache_max); 984252330Sjeff 985252330Sjeff TAILQ_INIT(&vm->vm_seglist); 986252330Sjeff for (i = 0; i < VMEM_MAXORDER; i++) { 987252330Sjeff LIST_INIT(&vm->vm_freelist[i]); 988252330Sjeff } 989252330Sjeff memset(&vm->vm_hash0, 0, sizeof(vm->vm_hash0)); 990252330Sjeff vm->vm_hashsize = VMEM_HASHSIZE_MIN; 991252330Sjeff vm->vm_hashlist = vm->vm_hash0; 992252330Sjeff 993252330Sjeff if (size != 0) { 994252330Sjeff if (vmem_add(vm, base, size, flags) != 0) { 995252330Sjeff vmem_destroy1(vm); 996252330Sjeff return NULL; 997252330Sjeff } 998252330Sjeff } 999252330Sjeff 1000252330Sjeff mtx_lock(&vmem_list_lock); 1001252330Sjeff LIST_INSERT_HEAD(&vmem_list, vm, vm_alllist); 1002252330Sjeff mtx_unlock(&vmem_list_lock); 1003252330Sjeff 1004252330Sjeff return vm; 1005252330Sjeff} 1006252330Sjeff 1007252330Sjeff/* 1008252330Sjeff * vmem_create: create an arena. 1009252330Sjeff */ 1010252330Sjeffvmem_t * 1011252330Sjeffvmem_create(const char *name, vmem_addr_t base, vmem_size_t size, 1012252330Sjeff vmem_size_t quantum, vmem_size_t qcache_max, int flags) 1013252330Sjeff{ 1014252330Sjeff 1015252330Sjeff vmem_t *vm; 1016252330Sjeff 1017252330Sjeff vm = malloc(sizeof(*vm), M_VMEM, flags & (M_WAITOK|M_NOWAIT)); 1018252330Sjeff if (vm == NULL) 1019252330Sjeff return (NULL); 1020252330Sjeff if (vmem_init(vm, name, base, size, quantum, qcache_max, 1021252330Sjeff flags) == NULL) { 1022252330Sjeff free(vm, M_VMEM); 1023252330Sjeff return (NULL); 1024252330Sjeff } 1025252330Sjeff return (vm); 1026252330Sjeff} 1027252330Sjeff 1028252330Sjeffvoid 1029252330Sjeffvmem_destroy(vmem_t *vm) 1030252330Sjeff{ 1031252330Sjeff 1032252330Sjeff mtx_lock(&vmem_list_lock); 1033252330Sjeff LIST_REMOVE(vm, vm_alllist); 1034252330Sjeff mtx_unlock(&vmem_list_lock); 1035252330Sjeff 1036252330Sjeff vmem_destroy1(vm); 1037252330Sjeff} 1038252330Sjeff 1039252330Sjeffvmem_size_t 1040252330Sjeffvmem_roundup_size(vmem_t *vm, vmem_size_t size) 1041252330Sjeff{ 1042252330Sjeff 1043252330Sjeff return (size + vm->vm_quantum_mask) & ~vm->vm_quantum_mask; 1044252330Sjeff} 1045252330Sjeff 1046252330Sjeff/* 1047252330Sjeff * vmem_alloc: allocate resource from the arena. 1048252330Sjeff */ 1049252330Sjeffint 1050252330Sjeffvmem_alloc(vmem_t *vm, vmem_size_t size, int flags, vmem_addr_t *addrp) 1051252330Sjeff{ 1052252330Sjeff const int strat __unused = flags & VMEM_FITMASK; 1053252330Sjeff qcache_t *qc; 1054252330Sjeff 1055252330Sjeff flags &= VMEM_FLAGS; 1056252330Sjeff MPASS(size > 0); 1057252330Sjeff MPASS(strat == M_BESTFIT || strat == M_FIRSTFIT); 1058252330Sjeff if ((flags & M_NOWAIT) == 0) 1059252330Sjeff WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL, "vmem_alloc"); 1060252330Sjeff 1061252330Sjeff if (size <= vm->vm_qcache_max) { 1062252330Sjeff qc = &vm->vm_qcache[(size - 1) >> vm->vm_quantum_shift]; 1063252330Sjeff *addrp = (vmem_addr_t)uma_zalloc(qc->qc_cache, flags); 1064252330Sjeff if (*addrp == 0) 1065252330Sjeff return (ENOMEM); 1066252330Sjeff return (0); 1067252330Sjeff } 1068252330Sjeff 1069252330Sjeff return vmem_xalloc(vm, size, 0, 0, 0, VMEM_ADDR_MIN, VMEM_ADDR_MAX, 1070252330Sjeff flags, addrp); 1071252330Sjeff} 1072252330Sjeff 1073252330Sjeffint 1074252330Sjeffvmem_xalloc(vmem_t *vm, const vmem_size_t size0, vmem_size_t align, 1075252330Sjeff const vmem_size_t phase, const vmem_size_t nocross, 1076252330Sjeff const vmem_addr_t minaddr, const vmem_addr_t maxaddr, int flags, 1077252330Sjeff vmem_addr_t *addrp) 1078252330Sjeff{ 1079252330Sjeff const vmem_size_t size = vmem_roundup_size(vm, size0); 1080252330Sjeff struct vmem_freelist *list; 1081252330Sjeff struct vmem_freelist *first; 1082252330Sjeff struct vmem_freelist *end; 1083252330Sjeff vmem_size_t avail; 1084252330Sjeff bt_t *bt; 1085252330Sjeff int error; 1086252330Sjeff int strat; 1087252330Sjeff 1088252330Sjeff flags &= VMEM_FLAGS; 1089252330Sjeff strat = flags & VMEM_FITMASK; 1090252330Sjeff MPASS(size0 > 0); 1091252330Sjeff MPASS(size > 0); 1092252330Sjeff MPASS(strat == M_BESTFIT || strat == M_FIRSTFIT); 1093252330Sjeff MPASS((flags & (M_NOWAIT|M_WAITOK)) != (M_NOWAIT|M_WAITOK)); 1094252330Sjeff if ((flags & M_NOWAIT) == 0) 1095252330Sjeff WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL, "vmem_xalloc"); 1096252330Sjeff MPASS((align & vm->vm_quantum_mask) == 0); 1097252330Sjeff MPASS((align & (align - 1)) == 0); 1098252330Sjeff MPASS((phase & vm->vm_quantum_mask) == 0); 1099252330Sjeff MPASS((nocross & vm->vm_quantum_mask) == 0); 1100252330Sjeff MPASS((nocross & (nocross - 1)) == 0); 1101252330Sjeff MPASS((align == 0 && phase == 0) || phase < align); 1102252330Sjeff MPASS(nocross == 0 || nocross >= size); 1103252330Sjeff MPASS(minaddr <= maxaddr); 1104252330Sjeff MPASS(!VMEM_CROSS_P(phase, phase + size - 1, nocross)); 1105252330Sjeff 1106252330Sjeff if (align == 0) 1107252330Sjeff align = vm->vm_quantum_mask + 1; 1108252330Sjeff 1109252330Sjeff *addrp = 0; 1110252330Sjeff end = &vm->vm_freelist[VMEM_MAXORDER]; 1111252330Sjeff /* 1112252330Sjeff * choose a free block from which we allocate. 1113252330Sjeff */ 1114252330Sjeff first = bt_freehead_toalloc(vm, size, strat); 1115252330Sjeff VMEM_LOCK(vm); 1116252330Sjeff for (;;) { 1117252330Sjeff /* 1118252330Sjeff * Make sure we have enough tags to complete the 1119252330Sjeff * operation. 1120252330Sjeff */ 1121252330Sjeff if (vm->vm_nfreetags < BT_MAXALLOC && 1122252330Sjeff bt_fill(vm, flags) != 0) { 1123252330Sjeff error = ENOMEM; 1124252330Sjeff break; 1125252330Sjeff } 1126252330Sjeff /* 1127252330Sjeff * Scan freelists looking for a tag that satisfies the 1128252330Sjeff * allocation. If we're doing BESTFIT we may encounter 1129252330Sjeff * sizes below the request. If we're doing FIRSTFIT we 1130252330Sjeff * inspect only the first element from each list. 1131252330Sjeff */ 1132252330Sjeff for (list = first; list < end; list++) { 1133252330Sjeff LIST_FOREACH(bt, list, bt_freelist) { 1134252330Sjeff if (bt->bt_size >= size) { 1135252330Sjeff error = vmem_fit(bt, size, align, phase, 1136252330Sjeff nocross, minaddr, maxaddr, addrp); 1137252330Sjeff if (error == 0) { 1138252330Sjeff vmem_clip(vm, bt, *addrp, size); 1139252330Sjeff goto out; 1140252330Sjeff } 1141252330Sjeff } 1142252330Sjeff /* FIRST skips to the next list. */ 1143252330Sjeff if (strat == M_FIRSTFIT) 1144252330Sjeff break; 1145252330Sjeff } 1146252330Sjeff } 1147252330Sjeff /* 1148252330Sjeff * Retry if the fast algorithm failed. 1149252330Sjeff */ 1150252330Sjeff if (strat == M_FIRSTFIT) { 1151252330Sjeff strat = M_BESTFIT; 1152252330Sjeff first = bt_freehead_toalloc(vm, size, strat); 1153252330Sjeff continue; 1154252330Sjeff } 1155252330Sjeff /* 1156252330Sjeff * XXX it is possible to fail to meet restrictions with the 1157252330Sjeff * imported region. It is up to the user to specify the 1158252330Sjeff * import quantum such that it can satisfy any allocation. 1159252330Sjeff */ 1160252330Sjeff if (vmem_import(vm, size, flags) == 0) 1161252330Sjeff continue; 1162252330Sjeff 1163252330Sjeff /* 1164252330Sjeff * Try to free some space from the quantum cache or reclaim 1165252330Sjeff * functions if available. 1166252330Sjeff */ 1167252330Sjeff if (vm->vm_qcache_max != 0 || vm->vm_reclaimfn != NULL) { 1168252330Sjeff avail = vm->vm_size - vm->vm_inuse; 1169252330Sjeff VMEM_UNLOCK(vm); 1170252330Sjeff if (vm->vm_qcache_max != 0) 1171252330Sjeff qc_drain(vm); 1172252330Sjeff if (vm->vm_reclaimfn != NULL) 1173252330Sjeff vm->vm_reclaimfn(vm, flags); 1174252330Sjeff VMEM_LOCK(vm); 1175252330Sjeff /* If we were successful retry even NOWAIT. */ 1176252330Sjeff if (vm->vm_size - vm->vm_inuse > avail) 1177252330Sjeff continue; 1178252330Sjeff } 1179252330Sjeff if ((flags & M_NOWAIT) != 0) { 1180252330Sjeff error = ENOMEM; 1181252330Sjeff break; 1182252330Sjeff } 1183252330Sjeff VMEM_CONDVAR_WAIT(vm); 1184252330Sjeff } 1185252330Sjeffout: 1186252330Sjeff VMEM_UNLOCK(vm); 1187252330Sjeff if (error != 0 && (flags & M_NOWAIT) == 0) 1188252330Sjeff panic("failed to allocate waiting allocation\n"); 1189252330Sjeff 1190252330Sjeff return (error); 1191252330Sjeff} 1192252330Sjeff 1193252330Sjeff/* 1194252330Sjeff * vmem_free: free the resource to the arena. 1195252330Sjeff */ 1196252330Sjeffvoid 1197252330Sjeffvmem_free(vmem_t *vm, vmem_addr_t addr, vmem_size_t size) 1198252330Sjeff{ 1199252330Sjeff qcache_t *qc; 1200252330Sjeff MPASS(size > 0); 1201252330Sjeff 1202252330Sjeff if (size <= vm->vm_qcache_max) { 1203252330Sjeff qc = &vm->vm_qcache[(size - 1) >> vm->vm_quantum_shift]; 1204252330Sjeff uma_zfree(qc->qc_cache, (void *)addr); 1205252330Sjeff } else 1206252330Sjeff vmem_xfree(vm, addr, size); 1207252330Sjeff} 1208252330Sjeff 1209252330Sjeffvoid 1210252330Sjeffvmem_xfree(vmem_t *vm, vmem_addr_t addr, vmem_size_t size) 1211252330Sjeff{ 1212252330Sjeff bt_t *bt; 1213252330Sjeff bt_t *t; 1214252330Sjeff 1215252330Sjeff MPASS(size > 0); 1216252330Sjeff 1217252330Sjeff VMEM_LOCK(vm); 1218252330Sjeff bt = bt_lookupbusy(vm, addr); 1219252330Sjeff MPASS(bt != NULL); 1220252330Sjeff MPASS(bt->bt_start == addr); 1221252330Sjeff MPASS(bt->bt_size == vmem_roundup_size(vm, size) || 1222252330Sjeff bt->bt_size - vmem_roundup_size(vm, size) <= vm->vm_quantum_mask); 1223252330Sjeff MPASS(bt->bt_type == BT_TYPE_BUSY); 1224252330Sjeff bt_rembusy(vm, bt); 1225252330Sjeff bt->bt_type = BT_TYPE_FREE; 1226252330Sjeff 1227252330Sjeff /* coalesce */ 1228252330Sjeff t = TAILQ_NEXT(bt, bt_seglist); 1229252330Sjeff if (t != NULL && t->bt_type == BT_TYPE_FREE) { 1230252330Sjeff MPASS(BT_END(bt) < t->bt_start); /* YYY */ 1231252330Sjeff bt->bt_size += t->bt_size; 1232252330Sjeff bt_remfree(vm, t); 1233252330Sjeff bt_remseg(vm, t); 1234252330Sjeff } 1235252330Sjeff t = TAILQ_PREV(bt, vmem_seglist, bt_seglist); 1236252330Sjeff if (t != NULL && t->bt_type == BT_TYPE_FREE) { 1237252330Sjeff MPASS(BT_END(t) < bt->bt_start); /* YYY */ 1238252330Sjeff bt->bt_size += t->bt_size; 1239252330Sjeff bt->bt_start = t->bt_start; 1240252330Sjeff bt_remfree(vm, t); 1241252330Sjeff bt_remseg(vm, t); 1242252330Sjeff } 1243252330Sjeff 1244252330Sjeff t = TAILQ_PREV(bt, vmem_seglist, bt_seglist); 1245252330Sjeff MPASS(t != NULL); 1246252330Sjeff MPASS(BT_ISSPAN_P(t) || t->bt_type == BT_TYPE_BUSY); 1247252330Sjeff if (vm->vm_releasefn != NULL && t->bt_type == BT_TYPE_SPAN && 1248252330Sjeff t->bt_size == bt->bt_size) { 1249252330Sjeff vmem_addr_t spanaddr; 1250252330Sjeff vmem_size_t spansize; 1251252330Sjeff 1252252330Sjeff MPASS(t->bt_start == bt->bt_start); 1253252330Sjeff spanaddr = bt->bt_start; 1254252330Sjeff spansize = bt->bt_size; 1255252330Sjeff bt_remseg(vm, bt); 1256252330Sjeff bt_remseg(vm, t); 1257252330Sjeff vm->vm_size -= spansize; 1258252330Sjeff VMEM_CONDVAR_BROADCAST(vm); 1259252330Sjeff bt_freetrim(vm, BT_MAXFREE); 1260252330Sjeff (*vm->vm_releasefn)(vm->vm_arg, spanaddr, spansize); 1261252330Sjeff } else { 1262252330Sjeff bt_insfree(vm, bt); 1263252330Sjeff VMEM_CONDVAR_BROADCAST(vm); 1264252330Sjeff bt_freetrim(vm, BT_MAXFREE); 1265252330Sjeff } 1266252330Sjeff} 1267252330Sjeff 1268252330Sjeff/* 1269252330Sjeff * vmem_add: 1270252330Sjeff * 1271252330Sjeff */ 1272252330Sjeffint 1273252330Sjeffvmem_add(vmem_t *vm, vmem_addr_t addr, vmem_size_t size, int flags) 1274252330Sjeff{ 1275252330Sjeff int error; 1276252330Sjeff 1277252330Sjeff error = 0; 1278252330Sjeff flags &= VMEM_FLAGS; 1279252330Sjeff VMEM_LOCK(vm); 1280252330Sjeff if (vm->vm_nfreetags >= BT_MAXALLOC || bt_fill(vm, flags) == 0) 1281253596Sglebius vmem_add1(vm, addr, size, BT_TYPE_SPAN_STATIC); 1282252330Sjeff else 1283252330Sjeff error = ENOMEM; 1284252330Sjeff VMEM_UNLOCK(vm); 1285252330Sjeff 1286252330Sjeff return (error); 1287252330Sjeff} 1288252330Sjeff 1289252330Sjeff/* 1290252330Sjeff * vmem_size: information about arenas size 1291252330Sjeff */ 1292252330Sjeffvmem_size_t 1293252330Sjeffvmem_size(vmem_t *vm, int typemask) 1294252330Sjeff{ 1295252330Sjeff 1296252330Sjeff switch (typemask) { 1297252330Sjeff case VMEM_ALLOC: 1298252330Sjeff return vm->vm_inuse; 1299252330Sjeff case VMEM_FREE: 1300252330Sjeff return vm->vm_size - vm->vm_inuse; 1301252330Sjeff case VMEM_FREE|VMEM_ALLOC: 1302252330Sjeff return vm->vm_size; 1303252330Sjeff default: 1304252330Sjeff panic("vmem_size"); 1305252330Sjeff } 1306252330Sjeff} 1307252330Sjeff 1308252330Sjeff/* ---- debug */ 1309252330Sjeff 1310252330Sjeff#if defined(DDB) || defined(DIAGNOSTIC) 1311252330Sjeff 1312252330Sjeffstatic void bt_dump(const bt_t *, int (*)(const char *, ...) 1313252330Sjeff __printflike(1, 2)); 1314252330Sjeff 1315252330Sjeffstatic const char * 1316252330Sjeffbt_type_string(int type) 1317252330Sjeff{ 1318252330Sjeff 1319252330Sjeff switch (type) { 1320252330Sjeff case BT_TYPE_BUSY: 1321252330Sjeff return "busy"; 1322252330Sjeff case BT_TYPE_FREE: 1323252330Sjeff return "free"; 1324252330Sjeff case BT_TYPE_SPAN: 1325252330Sjeff return "span"; 1326252330Sjeff case BT_TYPE_SPAN_STATIC: 1327252330Sjeff return "static span"; 1328252330Sjeff default: 1329252330Sjeff break; 1330252330Sjeff } 1331252330Sjeff return "BOGUS"; 1332252330Sjeff} 1333252330Sjeff 1334252330Sjeffstatic void 1335252330Sjeffbt_dump(const bt_t *bt, int (*pr)(const char *, ...)) 1336252330Sjeff{ 1337252330Sjeff 1338252330Sjeff (*pr)("\t%p: %jx %jx, %d(%s)\n", 1339252330Sjeff bt, (intmax_t)bt->bt_start, (intmax_t)bt->bt_size, 1340252330Sjeff bt->bt_type, bt_type_string(bt->bt_type)); 1341252330Sjeff} 1342252330Sjeff 1343252330Sjeffstatic void 1344252330Sjeffvmem_dump(const vmem_t *vm , int (*pr)(const char *, ...) __printflike(1, 2)) 1345252330Sjeff{ 1346252330Sjeff const bt_t *bt; 1347252330Sjeff int i; 1348252330Sjeff 1349252330Sjeff (*pr)("vmem %p '%s'\n", vm, vm->vm_name); 1350252330Sjeff TAILQ_FOREACH(bt, &vm->vm_seglist, bt_seglist) { 1351252330Sjeff bt_dump(bt, pr); 1352252330Sjeff } 1353252330Sjeff 1354252330Sjeff for (i = 0; i < VMEM_MAXORDER; i++) { 1355252330Sjeff const struct vmem_freelist *fl = &vm->vm_freelist[i]; 1356252330Sjeff 1357252330Sjeff if (LIST_EMPTY(fl)) { 1358252330Sjeff continue; 1359252330Sjeff } 1360252330Sjeff 1361252330Sjeff (*pr)("freelist[%d]\n", i); 1362252330Sjeff LIST_FOREACH(bt, fl, bt_freelist) { 1363252330Sjeff bt_dump(bt, pr); 1364252330Sjeff } 1365252330Sjeff } 1366252330Sjeff} 1367252330Sjeff 1368252330Sjeff#endif /* defined(DDB) || defined(DIAGNOSTIC) */ 1369252330Sjeff 1370252330Sjeff#if defined(DDB) 1371252330Sjeffstatic bt_t * 1372252330Sjeffvmem_whatis_lookup(vmem_t *vm, vmem_addr_t addr) 1373252330Sjeff{ 1374252330Sjeff bt_t *bt; 1375252330Sjeff 1376252330Sjeff TAILQ_FOREACH(bt, &vm->vm_seglist, bt_seglist) { 1377252330Sjeff if (BT_ISSPAN_P(bt)) { 1378252330Sjeff continue; 1379252330Sjeff } 1380252330Sjeff if (bt->bt_start <= addr && addr <= BT_END(bt)) { 1381252330Sjeff return bt; 1382252330Sjeff } 1383252330Sjeff } 1384252330Sjeff 1385252330Sjeff return NULL; 1386252330Sjeff} 1387252330Sjeff 1388252330Sjeffvoid 1389252330Sjeffvmem_whatis(vmem_addr_t addr, int (*pr)(const char *, ...)) 1390252330Sjeff{ 1391252330Sjeff vmem_t *vm; 1392252330Sjeff 1393252330Sjeff LIST_FOREACH(vm, &vmem_list, vm_alllist) { 1394252330Sjeff bt_t *bt; 1395252330Sjeff 1396252330Sjeff bt = vmem_whatis_lookup(vm, addr); 1397252330Sjeff if (bt == NULL) { 1398252330Sjeff continue; 1399252330Sjeff } 1400252330Sjeff (*pr)("%p is %p+%zu in VMEM '%s' (%s)\n", 1401252330Sjeff (void *)addr, (void *)bt->bt_start, 1402252330Sjeff (vmem_size_t)(addr - bt->bt_start), vm->vm_name, 1403252330Sjeff (bt->bt_type == BT_TYPE_BUSY) ? "allocated" : "free"); 1404252330Sjeff } 1405252330Sjeff} 1406252330Sjeff 1407252330Sjeffvoid 1408252330Sjeffvmem_printall(const char *modif, int (*pr)(const char *, ...)) 1409252330Sjeff{ 1410252330Sjeff const vmem_t *vm; 1411252330Sjeff 1412252330Sjeff LIST_FOREACH(vm, &vmem_list, vm_alllist) { 1413252330Sjeff vmem_dump(vm, pr); 1414252330Sjeff } 1415252330Sjeff} 1416252330Sjeff 1417252330Sjeffvoid 1418252330Sjeffvmem_print(vmem_addr_t addr, const char *modif, int (*pr)(const char *, ...)) 1419252330Sjeff{ 1420252330Sjeff const vmem_t *vm = (const void *)addr; 1421252330Sjeff 1422252330Sjeff vmem_dump(vm, pr); 1423252330Sjeff} 1424252330Sjeff#endif /* defined(DDB) */ 1425252330Sjeff 1426252330Sjeff#define vmem_printf printf 1427252330Sjeff 1428252330Sjeff#if defined(DIAGNOSTIC) 1429252330Sjeff 1430252330Sjeffstatic bool 1431252330Sjeffvmem_check_sanity(vmem_t *vm) 1432252330Sjeff{ 1433252330Sjeff const bt_t *bt, *bt2; 1434252330Sjeff 1435252330Sjeff MPASS(vm != NULL); 1436252330Sjeff 1437252330Sjeff TAILQ_FOREACH(bt, &vm->vm_seglist, bt_seglist) { 1438252330Sjeff if (bt->bt_start > BT_END(bt)) { 1439252330Sjeff printf("corrupted tag\n"); 1440252330Sjeff bt_dump(bt, vmem_printf); 1441252330Sjeff return false; 1442252330Sjeff } 1443252330Sjeff } 1444252330Sjeff TAILQ_FOREACH(bt, &vm->vm_seglist, bt_seglist) { 1445252330Sjeff TAILQ_FOREACH(bt2, &vm->vm_seglist, bt_seglist) { 1446252330Sjeff if (bt == bt2) { 1447252330Sjeff continue; 1448252330Sjeff } 1449252330Sjeff if (BT_ISSPAN_P(bt) != BT_ISSPAN_P(bt2)) { 1450252330Sjeff continue; 1451252330Sjeff } 1452252330Sjeff if (bt->bt_start <= BT_END(bt2) && 1453252330Sjeff bt2->bt_start <= BT_END(bt)) { 1454252330Sjeff printf("overwrapped tags\n"); 1455252330Sjeff bt_dump(bt, vmem_printf); 1456252330Sjeff bt_dump(bt2, vmem_printf); 1457252330Sjeff return false; 1458252330Sjeff } 1459252330Sjeff } 1460252330Sjeff } 1461252330Sjeff 1462252330Sjeff return true; 1463252330Sjeff} 1464252330Sjeff 1465252330Sjeffstatic void 1466252330Sjeffvmem_check(vmem_t *vm) 1467252330Sjeff{ 1468252330Sjeff 1469252330Sjeff if (!vmem_check_sanity(vm)) { 1470252330Sjeff panic("insanity vmem %p", vm); 1471252330Sjeff } 1472252330Sjeff} 1473252330Sjeff 1474252330Sjeff#endif /* defined(DIAGNOSTIC) */ 1475