1/* $NetBSD$ */ 2 3/*- 4 * Copyright (c)2006,2007,2008,2009 YAMAMOTO Takashi, 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 26 * SUCH DAMAGE. 27 */ 28 29/* 30 * reference: 31 * - Magazines and Vmem: Extending the Slab Allocator 32 * to Many CPUs and Arbitrary Resources 33 * http://www.usenix.org/event/usenix01/bonwick.html 34 */ 35 36#include <sys/cdefs.h> 37__KERNEL_RCSID(0, "$NetBSD$"); 38 39#if defined(_KERNEL) 40#include "opt_ddb.h" 41#define QCACHE 42#endif /* defined(_KERNEL) */ 43 44#include <sys/param.h> 45#include <sys/hash.h> 46#include <sys/queue.h> 47#include <sys/bitops.h> 48 49#if defined(_KERNEL) 50#include <sys/systm.h> 51#include <sys/kernel.h> /* hz */ 52#include <sys/callout.h> 53#include <sys/kmem.h> 54#include <sys/pool.h> 55#include <sys/vmem.h> 56#include <sys/workqueue.h> 57#include <sys/atomic.h> 58#include <uvm/uvm.h> 59#include <uvm/uvm_extern.h> 60#include <uvm/uvm_km.h> 61#include <uvm/uvm_page.h> 62#include <uvm/uvm_pdaemon.h> 63#else /* defined(_KERNEL) */ 64#include "../sys/vmem.h" 65#endif /* defined(_KERNEL) */ 66 67 68#if defined(_KERNEL) 69#include <sys/evcnt.h> 70#define VMEM_EVCNT_DEFINE(name) \ 71struct evcnt vmem_evcnt_##name = EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, \ 72 "vmemev", #name); \ 73EVCNT_ATTACH_STATIC(vmem_evcnt_##name); 74#define VMEM_EVCNT_INCR(ev) vmem_evcnt_##ev.ev_count++ 75#define VMEM_EVCNT_DECR(ev) vmem_evcnt_##ev.ev_count-- 76 77VMEM_EVCNT_DEFINE(bt_pages) 78VMEM_EVCNT_DEFINE(bt_count) 79VMEM_EVCNT_DEFINE(bt_inuse) 80 81#define LOCK_DECL(name) \ 82 kmutex_t name; char lockpad[COHERENCY_UNIT - sizeof(kmutex_t)] 83 84#define CONDVAR_DECL(name) \ 85 kcondvar_t name; 86 87#else /* defined(_KERNEL) */ 88#include <stdio.h> 89#include <errno.h> 90#include <assert.h> 91#include <stdlib.h> 92#include <string.h> 93 94#define VMEM_EVCNT_INCR(ev) /* nothing */ 95#define VMEM_EVCNT_DECR(ev) /* nothing */ 96 97#define UNITTEST 98#define KASSERT(a) assert(a) 99#define LOCK_DECL(name) /* nothing */ 100#define CONDVAR_DECL(name) /* nothing */ 101#define VMEM_CONDVAR_INIT(vm, wchan) /* nothing */ 102#define VMEM_CONDVAR_BROADCAST(vm) /* nothing */ 103#define mutex_init(a, b, c) /* nothing */ 104#define mutex_destroy(a) /* nothing */ 105#define mutex_enter(a) /* nothing */ 106#define mutex_tryenter(a) true 107#define mutex_exit(a) /* nothing */ 108#define mutex_owned(a) /* nothing */ 109#define ASSERT_SLEEPABLE() /* nothing */ 110#define panic(...) printf(__VA_ARGS__); abort() 111#endif /* defined(_KERNEL) */ 112 113struct vmem; 114struct vmem_btag; 115 116#if defined(VMEM_SANITY) 117static void vmem_check(vmem_t *); 118#else /* defined(VMEM_SANITY) */ 119#define vmem_check(vm) /* nothing */ 120#endif /* defined(VMEM_SANITY) */ 121 122#define VMEM_MAXORDER (sizeof(vmem_size_t) * CHAR_BIT) 123 124#define VMEM_HASHSIZE_MIN 1 /* XXX */ 125#define VMEM_HASHSIZE_MAX 65536 /* XXX */ 126#define VMEM_HASHSIZE_INIT 1 127 128#define VM_FITMASK (VM_BESTFIT | VM_INSTANTFIT) 129 130CIRCLEQ_HEAD(vmem_seglist, vmem_btag); 131LIST_HEAD(vmem_freelist, vmem_btag); 132LIST_HEAD(vmem_hashlist, vmem_btag); 133 134#if defined(QCACHE) 135#define VMEM_QCACHE_IDX_MAX 32 136 137#define QC_NAME_MAX 16 138 139struct qcache { 140 pool_cache_t qc_cache; 141 vmem_t *qc_vmem; 142 char qc_name[QC_NAME_MAX]; 143}; 144typedef struct qcache qcache_t; 145#define QC_POOL_TO_QCACHE(pool) ((qcache_t *)(pool->pr_qcache)) 146#endif /* defined(QCACHE) */ 147 148#define VMEM_NAME_MAX 16 149 150/* vmem arena */ 151struct vmem { 152 CONDVAR_DECL(vm_cv); 153 LOCK_DECL(vm_lock); 154 vm_flag_t vm_flags; 155 vmem_import_t *vm_importfn; 156 vmem_release_t *vm_releasefn; 157 size_t vm_nfreetags; 158 LIST_HEAD(, vmem_btag) vm_freetags; 159 void *vm_arg; 160 struct vmem_seglist vm_seglist; 161 struct vmem_freelist vm_freelist[VMEM_MAXORDER]; 162 size_t vm_hashsize; 163 size_t vm_nbusytag; 164 struct vmem_hashlist *vm_hashlist; 165 struct vmem_hashlist vm_hash0; 166 size_t vm_quantum_mask; 167 int vm_quantum_shift; 168 size_t vm_size; 169 size_t vm_inuse; 170 char vm_name[VMEM_NAME_MAX+1]; 171 LIST_ENTRY(vmem) vm_alllist; 172 173#if defined(QCACHE) 174 /* quantum cache */ 175 size_t vm_qcache_max; 176 struct pool_allocator vm_qcache_allocator; 177 qcache_t vm_qcache_store[VMEM_QCACHE_IDX_MAX]; 178 qcache_t *vm_qcache[VMEM_QCACHE_IDX_MAX]; 179#endif /* defined(QCACHE) */ 180}; 181 182#define VMEM_LOCK(vm) mutex_enter(&vm->vm_lock) 183#define VMEM_TRYLOCK(vm) mutex_tryenter(&vm->vm_lock) 184#define VMEM_UNLOCK(vm) mutex_exit(&vm->vm_lock) 185#define VMEM_LOCK_INIT(vm, ipl) mutex_init(&vm->vm_lock, MUTEX_DEFAULT, ipl) 186#define VMEM_LOCK_DESTROY(vm) mutex_destroy(&vm->vm_lock) 187#define VMEM_ASSERT_LOCKED(vm) KASSERT(mutex_owned(&vm->vm_lock)) 188 189#if defined(_KERNEL) 190#define VMEM_CONDVAR_INIT(vm, wchan) cv_init(&vm->vm_cv, wchan) 191#define VMEM_CONDVAR_DESTROY(vm) cv_destroy(&vm->vm_cv) 192#define VMEM_CONDVAR_WAIT(vm) cv_wait(&vm->vm_cv, &vm->vm_lock) 193#define VMEM_CONDVAR_BROADCAST(vm) cv_broadcast(&vm->vm_cv) 194#endif /* defined(_KERNEL) */ 195 196/* boundary tag */ 197struct vmem_btag { 198 CIRCLEQ_ENTRY(vmem_btag) bt_seglist; 199 union { 200 LIST_ENTRY(vmem_btag) u_freelist; /* BT_TYPE_FREE */ 201 LIST_ENTRY(vmem_btag) u_hashlist; /* BT_TYPE_BUSY */ 202 } bt_u; 203#define bt_hashlist bt_u.u_hashlist 204#define bt_freelist bt_u.u_freelist 205 vmem_addr_t bt_start; 206 vmem_size_t bt_size; 207 int bt_type; 208}; 209 210#define BT_TYPE_SPAN 1 211#define BT_TYPE_SPAN_STATIC 2 212#define BT_TYPE_FREE 3 213#define BT_TYPE_BUSY 4 214#define BT_ISSPAN_P(bt) ((bt)->bt_type <= BT_TYPE_SPAN_STATIC) 215 216#define BT_END(bt) ((bt)->bt_start + (bt)->bt_size - 1) 217 218typedef struct vmem_btag bt_t; 219 220#if defined(_KERNEL) 221static kmutex_t vmem_list_lock; 222static LIST_HEAD(, vmem) vmem_list = LIST_HEAD_INITIALIZER(vmem_list); 223#endif /* defined(_KERNEL) */ 224 225/* ---- misc */ 226 227#define VMEM_ALIGNUP(addr, align) \ 228 (-(-(addr) & -(align))) 229 230#define VMEM_CROSS_P(addr1, addr2, boundary) \ 231 ((((addr1) ^ (addr2)) & -(boundary)) != 0) 232 233#define ORDER2SIZE(order) ((vmem_size_t)1 << (order)) 234#define SIZE2ORDER(size) ((int)ilog2(size)) 235 236#if !defined(_KERNEL) 237#define xmalloc(sz, flags) malloc(sz) 238#define xfree(p, sz) free(p) 239#define bt_alloc(vm, flags) malloc(sizeof(bt_t)) 240#define bt_free(vm, bt) free(bt) 241#else /* defined(_KERNEL) */ 242 243#define xmalloc(sz, flags) \ 244 kmem_alloc(sz, ((flags) & VM_SLEEP) ? KM_SLEEP : KM_NOSLEEP); 245#define xfree(p, sz) kmem_free(p, sz); 246 247#define BT_MINRESERVE 6 248#define BT_MAXFREE 64 249#define STATIC_VMEM_COUNT 5 250#define STATIC_BT_COUNT 200 251/* must be equal or greater then qcache multiplier for kmem_va_arena */ 252#define STATIC_QC_POOL_COUNT 8 253 254static struct vmem static_vmems[STATIC_VMEM_COUNT]; 255static int static_vmem_count = STATIC_VMEM_COUNT; 256 257static struct vmem_btag static_bts[STATIC_BT_COUNT]; 258static int static_bt_count = STATIC_BT_COUNT; 259 260static struct pool_cache static_qc_pools[STATIC_QC_POOL_COUNT]; 261static int static_qc_pool_count = STATIC_QC_POOL_COUNT; 262 263vmem_t *kmem_va_meta_arena; 264vmem_t *kmem_meta_arena; 265 266static kmutex_t vmem_refill_lock; 267static kmutex_t vmem_btag_lock; 268static LIST_HEAD(, vmem_btag) vmem_btag_freelist; 269static size_t vmem_btag_freelist_count = 0; 270static size_t vmem_btag_count = STATIC_BT_COUNT; 271 272/* ---- boundary tag */ 273 274#define BT_PER_PAGE (PAGE_SIZE / sizeof(bt_t)) 275 276static int bt_refill(vmem_t *vm, vm_flag_t flags); 277 278static int 279bt_refillglobal(vm_flag_t flags) 280{ 281 vmem_addr_t va; 282 bt_t *btp; 283 bt_t *bt; 284 int i; 285 286 mutex_enter(&vmem_refill_lock); 287 288 mutex_enter(&vmem_btag_lock); 289 if (vmem_btag_freelist_count > (BT_MINRESERVE * 16)) { 290 mutex_exit(&vmem_btag_lock); 291 mutex_exit(&vmem_refill_lock); 292 return 0; 293 } 294 mutex_exit(&vmem_btag_lock); 295 296 if (vmem_alloc(kmem_meta_arena, PAGE_SIZE, 297 (flags & ~VM_FITMASK) | VM_INSTANTFIT | VM_POPULATING, &va) != 0) { 298 mutex_exit(&vmem_refill_lock); 299 return ENOMEM; 300 } 301 VMEM_EVCNT_INCR(bt_pages); 302 303 mutex_enter(&vmem_btag_lock); 304 btp = (void *) va; 305 for (i = 0; i < (BT_PER_PAGE); i++) { 306 bt = btp; 307 memset(bt, 0, sizeof(*bt)); 308 LIST_INSERT_HEAD(&vmem_btag_freelist, bt, 309 bt_freelist); 310 vmem_btag_freelist_count++; 311 vmem_btag_count++; 312 VMEM_EVCNT_INCR(bt_count); 313 btp++; 314 } 315 mutex_exit(&vmem_btag_lock); 316 317 bt_refill(kmem_arena, (flags & ~VM_FITMASK) 318 | VM_INSTANTFIT | VM_POPULATING); 319 bt_refill(kmem_va_meta_arena, (flags & ~VM_FITMASK) 320 | VM_INSTANTFIT | VM_POPULATING); 321 bt_refill(kmem_meta_arena, (flags & ~VM_FITMASK) 322 | VM_INSTANTFIT | VM_POPULATING); 323 324 mutex_exit(&vmem_refill_lock); 325 326 return 0; 327} 328 329static int 330bt_refill(vmem_t *vm, vm_flag_t flags) 331{ 332 bt_t *bt; 333 334 if (!(flags & VM_POPULATING)) { 335 bt_refillglobal(flags); 336 } 337 338 VMEM_LOCK(vm); 339 mutex_enter(&vmem_btag_lock); 340 while (!LIST_EMPTY(&vmem_btag_freelist) && 341 vm->vm_nfreetags < (BT_MINRESERVE * 2)) { 342 bt = LIST_FIRST(&vmem_btag_freelist); 343 LIST_REMOVE(bt, bt_freelist); 344 LIST_INSERT_HEAD(&vm->vm_freetags, bt, bt_freelist); 345 vm->vm_nfreetags++; 346 vmem_btag_freelist_count--; 347 } 348 mutex_exit(&vmem_btag_lock); 349 350 if (vm->vm_nfreetags == 0) { 351 VMEM_UNLOCK(vm); 352 return ENOMEM; 353 } 354 VMEM_UNLOCK(vm); 355 356 return 0; 357} 358 359static inline bt_t * 360bt_alloc(vmem_t *vm, vm_flag_t flags) 361{ 362 bt_t *bt; 363again: 364 VMEM_LOCK(vm); 365 if (vm->vm_nfreetags < BT_MINRESERVE && 366 (flags & VM_POPULATING) == 0) { 367 VMEM_UNLOCK(vm); 368 if (bt_refill(vm, VM_NOSLEEP | VM_INSTANTFIT)) { 369 return NULL; 370 } 371 goto again; 372 } 373 bt = LIST_FIRST(&vm->vm_freetags); 374 LIST_REMOVE(bt, bt_freelist); 375 vm->vm_nfreetags--; 376 VMEM_UNLOCK(vm); 377 VMEM_EVCNT_INCR(bt_inuse); 378 379 return bt; 380} 381 382static inline void 383bt_free(vmem_t *vm, bt_t *bt) 384{ 385 386 VMEM_LOCK(vm); 387 LIST_INSERT_HEAD(&vm->vm_freetags, bt, bt_freelist); 388 vm->vm_nfreetags++; 389 while (vm->vm_nfreetags > BT_MAXFREE) { 390 bt = LIST_FIRST(&vm->vm_freetags); 391 LIST_REMOVE(bt, bt_freelist); 392 vm->vm_nfreetags--; 393 mutex_enter(&vmem_btag_lock); 394 LIST_INSERT_HEAD(&vmem_btag_freelist, bt, bt_freelist); 395 vmem_btag_freelist_count++; 396 mutex_exit(&vmem_btag_lock); 397 } 398 VMEM_UNLOCK(vm); 399 VMEM_EVCNT_DECR(bt_inuse); 400} 401 402#endif /* defined(_KERNEL) */ 403 404/* 405 * freelist[0] ... [1, 1] 406 * freelist[1] ... [2, 3] 407 * freelist[2] ... [4, 7] 408 * freelist[3] ... [8, 15] 409 * : 410 * freelist[n] ... [(1 << n), (1 << (n + 1)) - 1] 411 * : 412 */ 413 414static struct vmem_freelist * 415bt_freehead_tofree(vmem_t *vm, vmem_size_t size) 416{ 417 const vmem_size_t qsize = size >> vm->vm_quantum_shift; 418 const int idx = SIZE2ORDER(qsize); 419 420 KASSERT(size != 0 && qsize != 0); 421 KASSERT((size & vm->vm_quantum_mask) == 0); 422 KASSERT(idx >= 0); 423 KASSERT(idx < VMEM_MAXORDER); 424 425 return &vm->vm_freelist[idx]; 426} 427 428/* 429 * bt_freehead_toalloc: return the freelist for the given size and allocation 430 * strategy. 431 * 432 * for VM_INSTANTFIT, return the list in which any blocks are large enough 433 * for the requested size. otherwise, return the list which can have blocks 434 * large enough for the requested size. 435 */ 436 437static struct vmem_freelist * 438bt_freehead_toalloc(vmem_t *vm, vmem_size_t size, vm_flag_t strat) 439{ 440 const vmem_size_t qsize = size >> vm->vm_quantum_shift; 441 int idx = SIZE2ORDER(qsize); 442 443 KASSERT(size != 0 && qsize != 0); 444 KASSERT((size & vm->vm_quantum_mask) == 0); 445 446 if (strat == VM_INSTANTFIT && ORDER2SIZE(idx) != qsize) { 447 idx++; 448 /* check too large request? */ 449 } 450 KASSERT(idx >= 0); 451 KASSERT(idx < VMEM_MAXORDER); 452 453 return &vm->vm_freelist[idx]; 454} 455 456/* ---- boundary tag hash */ 457 458static struct vmem_hashlist * 459bt_hashhead(vmem_t *vm, vmem_addr_t addr) 460{ 461 struct vmem_hashlist *list; 462 unsigned int hash; 463 464 hash = hash32_buf(&addr, sizeof(addr), HASH32_BUF_INIT); 465 list = &vm->vm_hashlist[hash % vm->vm_hashsize]; 466 467 return list; 468} 469 470static bt_t * 471bt_lookupbusy(vmem_t *vm, vmem_addr_t addr) 472{ 473 struct vmem_hashlist *list; 474 bt_t *bt; 475 476 list = bt_hashhead(vm, addr); 477 LIST_FOREACH(bt, list, bt_hashlist) { 478 if (bt->bt_start == addr) { 479 break; 480 } 481 } 482 483 return bt; 484} 485 486static void 487bt_rembusy(vmem_t *vm, bt_t *bt) 488{ 489 490 KASSERT(vm->vm_nbusytag > 0); 491 vm->vm_inuse -= bt->bt_size; 492 vm->vm_nbusytag--; 493 LIST_REMOVE(bt, bt_hashlist); 494} 495 496static void 497bt_insbusy(vmem_t *vm, bt_t *bt) 498{ 499 struct vmem_hashlist *list; 500 501 KASSERT(bt->bt_type == BT_TYPE_BUSY); 502 503 list = bt_hashhead(vm, bt->bt_start); 504 LIST_INSERT_HEAD(list, bt, bt_hashlist); 505 vm->vm_nbusytag++; 506 vm->vm_inuse += bt->bt_size; 507} 508 509/* ---- boundary tag list */ 510 511static void 512bt_remseg(vmem_t *vm, bt_t *bt) 513{ 514 515 CIRCLEQ_REMOVE(&vm->vm_seglist, bt, bt_seglist); 516} 517 518static void 519bt_insseg(vmem_t *vm, bt_t *bt, bt_t *prev) 520{ 521 522 CIRCLEQ_INSERT_AFTER(&vm->vm_seglist, prev, bt, bt_seglist); 523} 524 525static void 526bt_insseg_tail(vmem_t *vm, bt_t *bt) 527{ 528 529 CIRCLEQ_INSERT_TAIL(&vm->vm_seglist, bt, bt_seglist); 530} 531 532static void 533bt_remfree(vmem_t *vm, bt_t *bt) 534{ 535 536 KASSERT(bt->bt_type == BT_TYPE_FREE); 537 538 LIST_REMOVE(bt, bt_freelist); 539} 540 541static void 542bt_insfree(vmem_t *vm, bt_t *bt) 543{ 544 struct vmem_freelist *list; 545 546 list = bt_freehead_tofree(vm, bt->bt_size); 547 LIST_INSERT_HEAD(list, bt, bt_freelist); 548} 549 550/* ---- vmem internal functions */ 551 552#if defined(QCACHE) 553static inline vm_flag_t 554prf_to_vmf(int prflags) 555{ 556 vm_flag_t vmflags; 557 558 KASSERT((prflags & ~(PR_LIMITFAIL | PR_WAITOK | PR_NOWAIT)) == 0); 559 if ((prflags & PR_WAITOK) != 0) { 560 vmflags = VM_SLEEP; 561 } else { 562 vmflags = VM_NOSLEEP; 563 } 564 return vmflags; 565} 566 567static inline int 568vmf_to_prf(vm_flag_t vmflags) 569{ 570 int prflags; 571 572 if ((vmflags & VM_SLEEP) != 0) { 573 prflags = PR_WAITOK; 574 } else { 575 prflags = PR_NOWAIT; 576 } 577 return prflags; 578} 579 580static size_t 581qc_poolpage_size(size_t qcache_max) 582{ 583 int i; 584 585 for (i = 0; ORDER2SIZE(i) <= qcache_max * 3; i++) { 586 /* nothing */ 587 } 588 return ORDER2SIZE(i); 589} 590 591static void * 592qc_poolpage_alloc(struct pool *pool, int prflags) 593{ 594 qcache_t *qc = QC_POOL_TO_QCACHE(pool); 595 vmem_t *vm = qc->qc_vmem; 596 vmem_addr_t addr; 597 598 if (vmem_alloc(vm, pool->pr_alloc->pa_pagesz, 599 prf_to_vmf(prflags) | VM_INSTANTFIT, &addr) != 0) 600 return NULL; 601 return (void *)addr; 602} 603 604static void 605qc_poolpage_free(struct pool *pool, void *addr) 606{ 607 qcache_t *qc = QC_POOL_TO_QCACHE(pool); 608 vmem_t *vm = qc->qc_vmem; 609 610 vmem_free(vm, (vmem_addr_t)addr, pool->pr_alloc->pa_pagesz); 611} 612 613static void 614qc_init(vmem_t *vm, size_t qcache_max, int ipl) 615{ 616 qcache_t *prevqc; 617 struct pool_allocator *pa; 618 int qcache_idx_max; 619 int i; 620 621 KASSERT((qcache_max & vm->vm_quantum_mask) == 0); 622 if (qcache_max > (VMEM_QCACHE_IDX_MAX << vm->vm_quantum_shift)) { 623 qcache_max = VMEM_QCACHE_IDX_MAX << vm->vm_quantum_shift; 624 } 625 vm->vm_qcache_max = qcache_max; 626 pa = &vm->vm_qcache_allocator; 627 memset(pa, 0, sizeof(*pa)); 628 pa->pa_alloc = qc_poolpage_alloc; 629 pa->pa_free = qc_poolpage_free; 630 pa->pa_pagesz = qc_poolpage_size(qcache_max); 631 632 qcache_idx_max = qcache_max >> vm->vm_quantum_shift; 633 prevqc = NULL; 634 for (i = qcache_idx_max; i > 0; i--) { 635 qcache_t *qc = &vm->vm_qcache_store[i - 1]; 636 size_t size = i << vm->vm_quantum_shift; 637 pool_cache_t pc; 638 639 qc->qc_vmem = vm; 640 snprintf(qc->qc_name, sizeof(qc->qc_name), "%s-%zu", 641 vm->vm_name, size); 642 643 if (vm->vm_flags & VM_BOOTSTRAP) { 644 KASSERT(static_qc_pool_count > 0); 645 pc = &static_qc_pools[--static_qc_pool_count]; 646 pool_cache_bootstrap(pc, size, 647 ORDER2SIZE(vm->vm_quantum_shift), 0, 648 PR_NOALIGN | PR_NOTOUCH | PR_RECURSIVE /* XXX */, 649 qc->qc_name, pa, ipl, NULL, NULL, NULL); 650 } else { 651 pc = pool_cache_init(size, 652 ORDER2SIZE(vm->vm_quantum_shift), 0, 653 PR_NOALIGN | PR_NOTOUCH /* XXX */, 654 qc->qc_name, pa, ipl, NULL, NULL, NULL); 655 } 656 qc->qc_cache = pc; 657 KASSERT(qc->qc_cache != NULL); /* XXX */ 658 if (prevqc != NULL && 659 qc->qc_cache->pc_pool.pr_itemsperpage == 660 prevqc->qc_cache->pc_pool.pr_itemsperpage) { 661 if (vm->vm_flags & VM_BOOTSTRAP) { 662 pool_cache_bootstrap_destroy(pc); 663 //static_qc_pool_count++; 664 } else { 665 pool_cache_destroy(qc->qc_cache); 666 } 667 vm->vm_qcache[i - 1] = prevqc; 668 continue; 669 } 670 qc->qc_cache->pc_pool.pr_qcache = qc; 671 vm->vm_qcache[i - 1] = qc; 672 prevqc = qc; 673 } 674} 675 676static void 677qc_destroy(vmem_t *vm) 678{ 679 const qcache_t *prevqc; 680 int i; 681 int qcache_idx_max; 682 683 qcache_idx_max = vm->vm_qcache_max >> vm->vm_quantum_shift; 684 prevqc = NULL; 685 for (i = 0; i < qcache_idx_max; i++) { 686 qcache_t *qc = vm->vm_qcache[i]; 687 688 if (prevqc == qc) { 689 continue; 690 } 691 if (vm->vm_flags & VM_BOOTSTRAP) { 692 pool_cache_bootstrap_destroy(qc->qc_cache); 693 } else { 694 pool_cache_destroy(qc->qc_cache); 695 } 696 prevqc = qc; 697 } 698} 699#endif 700 701#if defined(_KERNEL) 702void 703vmem_bootstrap(void) 704{ 705 706 mutex_init(&vmem_list_lock, MUTEX_DEFAULT, IPL_VM); 707 mutex_init(&vmem_refill_lock, MUTEX_DEFAULT, IPL_VM); 708 mutex_init(&vmem_btag_lock, MUTEX_DEFAULT, IPL_VM); 709 710 while (static_bt_count-- > 0) { 711 bt_t *bt = &static_bts[static_bt_count]; 712 LIST_INSERT_HEAD(&vmem_btag_freelist, bt, bt_freelist); 713 VMEM_EVCNT_INCR(bt_count); 714 vmem_btag_freelist_count++; 715 } 716} 717 718void 719vmem_init(vmem_t *vm) 720{ 721 722 kmem_va_meta_arena = vmem_create("vmem-va", 0, 0, PAGE_SIZE, 723 vmem_alloc, vmem_free, vm, 724 0, VM_NOSLEEP | VM_BOOTSTRAP | VM_LARGEIMPORT, 725 IPL_VM); 726 727 kmem_meta_arena = vmem_create("vmem-meta", 0, 0, PAGE_SIZE, 728 uvm_km_kmem_alloc, uvm_km_kmem_free, kmem_va_meta_arena, 729 0, VM_NOSLEEP | VM_BOOTSTRAP, IPL_VM); 730} 731#endif /* defined(_KERNEL) */ 732 733static int 734vmem_add1(vmem_t *vm, vmem_addr_t addr, vmem_size_t size, vm_flag_t flags, 735 int spanbttype) 736{ 737 bt_t *btspan; 738 bt_t *btfree; 739 740 KASSERT((flags & (VM_SLEEP|VM_NOSLEEP)) != 0); 741 KASSERT((~flags & (VM_SLEEP|VM_NOSLEEP)) != 0); 742 KASSERT(spanbttype == BT_TYPE_SPAN || 743 spanbttype == BT_TYPE_SPAN_STATIC); 744 745 btspan = bt_alloc(vm, flags); 746 if (btspan == NULL) { 747 return ENOMEM; 748 } 749 btfree = bt_alloc(vm, flags); 750 if (btfree == NULL) { 751 bt_free(vm, btspan); 752 return ENOMEM; 753 } 754 755 btspan->bt_type = spanbttype; 756 btspan->bt_start = addr; 757 btspan->bt_size = size; 758 759 btfree->bt_type = BT_TYPE_FREE; 760 btfree->bt_start = addr; 761 btfree->bt_size = size; 762 763 VMEM_LOCK(vm); 764 bt_insseg_tail(vm, btspan); 765 bt_insseg(vm, btfree, btspan); 766 bt_insfree(vm, btfree); 767 vm->vm_size += size; 768 VMEM_UNLOCK(vm); 769 770 return 0; 771} 772 773static void 774vmem_destroy1(vmem_t *vm) 775{ 776 777#if defined(QCACHE) 778 qc_destroy(vm); 779#endif /* defined(QCACHE) */ 780 if (vm->vm_hashlist != NULL) { 781 int i; 782 783 for (i = 0; i < vm->vm_hashsize; i++) { 784 bt_t *bt; 785 786 while ((bt = LIST_FIRST(&vm->vm_hashlist[i])) != NULL) { 787 KASSERT(bt->bt_type == BT_TYPE_SPAN_STATIC); 788 bt_free(vm, bt); 789 } 790 } 791 if (vm->vm_hashlist != &vm->vm_hash0) { 792 xfree(vm->vm_hashlist, 793 sizeof(struct vmem_hashlist *) * vm->vm_hashsize); 794 } 795 } 796 797 while (vm->vm_nfreetags > 0) { 798 bt_t *bt = LIST_FIRST(&vm->vm_freetags); 799 LIST_REMOVE(bt, bt_freelist); 800 vm->vm_nfreetags--; 801 mutex_enter(&vmem_btag_lock); 802#if defined (_KERNEL) 803 LIST_INSERT_HEAD(&vmem_btag_freelist, bt, bt_freelist); 804 vmem_btag_freelist_count++; 805#endif /* defined(_KERNEL) */ 806 mutex_exit(&vmem_btag_lock); 807 } 808 809 VMEM_LOCK_DESTROY(vm); 810 xfree(vm, sizeof(*vm)); 811} 812 813static int 814vmem_import(vmem_t *vm, vmem_size_t size, vm_flag_t flags) 815{ 816 vmem_addr_t addr; 817 int rc; 818 819 if (vm->vm_importfn == NULL) { 820 return EINVAL; 821 } 822 823 if (vm->vm_flags & VM_LARGEIMPORT) { 824 size *= 8; 825 } 826 827 if (vm->vm_flags & VM_XIMPORT) { 828 rc = ((vmem_ximport_t *)vm->vm_importfn)(vm->vm_arg, size, 829 &size, flags, &addr); 830 } else { 831 rc = (vm->vm_importfn)(vm->vm_arg, size, flags, &addr); 832 } 833 if (rc) { 834 return ENOMEM; 835 } 836 837 if (vmem_add1(vm, addr, size, flags, BT_TYPE_SPAN) != 0) { 838 (*vm->vm_releasefn)(vm->vm_arg, addr, size); 839 return ENOMEM; 840 } 841 842 return 0; 843} 844 845static int 846vmem_rehash(vmem_t *vm, size_t newhashsize, vm_flag_t flags) 847{ 848 bt_t *bt; 849 int i; 850 struct vmem_hashlist *newhashlist; 851 struct vmem_hashlist *oldhashlist; 852 size_t oldhashsize; 853 854 KASSERT(newhashsize > 0); 855 856 newhashlist = 857 xmalloc(sizeof(struct vmem_hashlist *) * newhashsize, flags); 858 if (newhashlist == NULL) { 859 return ENOMEM; 860 } 861 for (i = 0; i < newhashsize; i++) { 862 LIST_INIT(&newhashlist[i]); 863 } 864 865 if (!VMEM_TRYLOCK(vm)) { 866 xfree(newhashlist, 867 sizeof(struct vmem_hashlist *) * newhashsize); 868 return EBUSY; 869 } 870 oldhashlist = vm->vm_hashlist; 871 oldhashsize = vm->vm_hashsize; 872 vm->vm_hashlist = newhashlist; 873 vm->vm_hashsize = newhashsize; 874 if (oldhashlist == NULL) { 875 VMEM_UNLOCK(vm); 876 return 0; 877 } 878 for (i = 0; i < oldhashsize; i++) { 879 while ((bt = LIST_FIRST(&oldhashlist[i])) != NULL) { 880 bt_rembusy(vm, bt); /* XXX */ 881 bt_insbusy(vm, bt); 882 } 883 } 884 VMEM_UNLOCK(vm); 885 886 if (oldhashlist != &vm->vm_hash0) { 887 xfree(oldhashlist, 888 sizeof(struct vmem_hashlist *) * oldhashsize); 889 } 890 891 return 0; 892} 893 894/* 895 * vmem_fit: check if a bt can satisfy the given restrictions. 896 * 897 * it's a caller's responsibility to ensure the region is big enough 898 * before calling us. 899 */ 900 901static int 902vmem_fit(const bt_t const *bt, vmem_size_t size, vmem_size_t align, 903 vmem_size_t phase, vmem_size_t nocross, 904 vmem_addr_t minaddr, vmem_addr_t maxaddr, vmem_addr_t *addrp) 905{ 906 vmem_addr_t start; 907 vmem_addr_t end; 908 909 KASSERT(size > 0); 910 KASSERT(bt->bt_size >= size); /* caller's responsibility */ 911 912 /* 913 * XXX assumption: vmem_addr_t and vmem_size_t are 914 * unsigned integer of the same size. 915 */ 916 917 start = bt->bt_start; 918 if (start < minaddr) { 919 start = minaddr; 920 } 921 end = BT_END(bt); 922 if (end > maxaddr) { 923 end = maxaddr; 924 } 925 if (start > end) { 926 return ENOMEM; 927 } 928 929 start = VMEM_ALIGNUP(start - phase, align) + phase; 930 if (start < bt->bt_start) { 931 start += align; 932 } 933 if (VMEM_CROSS_P(start, start + size - 1, nocross)) { 934 KASSERT(align < nocross); 935 start = VMEM_ALIGNUP(start - phase, nocross) + phase; 936 } 937 if (start <= end && end - start >= size - 1) { 938 KASSERT((start & (align - 1)) == phase); 939 KASSERT(!VMEM_CROSS_P(start, start + size - 1, nocross)); 940 KASSERT(minaddr <= start); 941 KASSERT(maxaddr == 0 || start + size - 1 <= maxaddr); 942 KASSERT(bt->bt_start <= start); 943 KASSERT(BT_END(bt) - start >= size - 1); 944 *addrp = start; 945 return 0; 946 } 947 return ENOMEM; 948} 949 950 951/* 952 * vmem_create_internal: creates a vmem arena. 953 */ 954 955static vmem_t * 956vmem_create_internal(const char *name, vmem_addr_t base, vmem_size_t size, 957 vmem_size_t quantum, vmem_import_t *importfn, vmem_release_t *releasefn, 958 void *arg, vmem_size_t qcache_max, vm_flag_t flags, int ipl) 959{ 960 vmem_t *vm = NULL; 961 int i; 962 963 KASSERT((flags & (VM_SLEEP|VM_NOSLEEP)) != 0); 964 KASSERT((~flags & (VM_SLEEP|VM_NOSLEEP)) != 0); 965 KASSERT(quantum > 0); 966 967 if (flags & VM_BOOTSTRAP) { 968#if defined(_KERNEL) 969 KASSERT(static_vmem_count > 0); 970 vm = &static_vmems[--static_vmem_count]; 971#endif /* defined(_KERNEL) */ 972 } else { 973 vm = xmalloc(sizeof(*vm), flags); 974 } 975 if (vm == NULL) { 976 return NULL; 977 } 978 979 VMEM_CONDVAR_INIT(vm, "vmem"); 980 VMEM_LOCK_INIT(vm, ipl); 981 vm->vm_flags = flags; 982 vm->vm_nfreetags = 0; 983 LIST_INIT(&vm->vm_freetags); 984 strlcpy(vm->vm_name, name, sizeof(vm->vm_name)); 985 vm->vm_quantum_mask = quantum - 1; 986 vm->vm_quantum_shift = SIZE2ORDER(quantum); 987 KASSERT(ORDER2SIZE(vm->vm_quantum_shift) == quantum); 988 vm->vm_importfn = importfn; 989 vm->vm_releasefn = releasefn; 990 vm->vm_arg = arg; 991 vm->vm_nbusytag = 0; 992 vm->vm_size = 0; 993 vm->vm_inuse = 0; 994#if defined(QCACHE) 995 qc_init(vm, qcache_max, ipl); 996#endif /* defined(QCACHE) */ 997 998 CIRCLEQ_INIT(&vm->vm_seglist); 999 for (i = 0; i < VMEM_MAXORDER; i++) { 1000 LIST_INIT(&vm->vm_freelist[i]); 1001 } 1002 vm->vm_hashlist = NULL; 1003 if (flags & VM_BOOTSTRAP) { 1004 vm->vm_hashsize = 1; 1005 vm->vm_hashlist = &vm->vm_hash0; 1006 } else if (vmem_rehash(vm, VMEM_HASHSIZE_INIT, flags)) { 1007 vmem_destroy1(vm); 1008 return NULL; 1009 } 1010 1011 if (size != 0) { 1012 if (vmem_add(vm, base, size, flags) != 0) { 1013 vmem_destroy1(vm); 1014 return NULL; 1015 } 1016 } 1017 1018#if defined(_KERNEL) 1019 if (flags & VM_BOOTSTRAP) { 1020 bt_refill(vm, VM_NOSLEEP); 1021 } 1022 1023 mutex_enter(&vmem_list_lock); 1024 LIST_INSERT_HEAD(&vmem_list, vm, vm_alllist); 1025 mutex_exit(&vmem_list_lock); 1026#endif /* defined(_KERNEL) */ 1027 1028 return vm; 1029} 1030 1031 1032/* ---- vmem API */ 1033 1034/* 1035 * vmem_create: create an arena. 1036 * 1037 * => must not be called from interrupt context. 1038 */ 1039 1040vmem_t * 1041vmem_create(const char *name, vmem_addr_t base, vmem_size_t size, 1042 vmem_size_t quantum, vmem_import_t *importfn, vmem_release_t *releasefn, 1043 vmem_t *source, vmem_size_t qcache_max, vm_flag_t flags, int ipl) 1044{ 1045 1046 KASSERT((flags & (VM_SLEEP|VM_NOSLEEP)) != 0); 1047 KASSERT((~flags & (VM_SLEEP|VM_NOSLEEP)) != 0); 1048 KASSERT((flags & (VM_XIMPORT)) == 0); 1049 1050 return vmem_create_internal(name, base, size, quantum, 1051 importfn, releasefn, source, qcache_max, flags, ipl); 1052} 1053 1054/* 1055 * vmem_xcreate: create an arena takes alternative import func. 1056 * 1057 * => must not be called from interrupt context. 1058 */ 1059 1060vmem_t * 1061vmem_xcreate(const char *name, vmem_addr_t base, vmem_size_t size, 1062 vmem_size_t quantum, vmem_ximport_t *importfn, vmem_release_t *releasefn, 1063 vmem_t *source, vmem_size_t qcache_max, vm_flag_t flags, int ipl) 1064{ 1065 1066 KASSERT((flags & (VM_SLEEP|VM_NOSLEEP)) != 0); 1067 KASSERT((~flags & (VM_SLEEP|VM_NOSLEEP)) != 0); 1068 KASSERT((flags & (VM_XIMPORT)) == 0); 1069 1070 return vmem_create_internal(name, base, size, quantum, 1071 (vmem_import_t *)importfn, releasefn, source, 1072 qcache_max, flags | VM_XIMPORT, ipl); 1073} 1074 1075void 1076vmem_destroy(vmem_t *vm) 1077{ 1078 1079#if defined(_KERNEL) 1080 mutex_enter(&vmem_list_lock); 1081 LIST_REMOVE(vm, vm_alllist); 1082 mutex_exit(&vmem_list_lock); 1083#endif /* defined(_KERNEL) */ 1084 1085 vmem_destroy1(vm); 1086} 1087 1088vmem_size_t 1089vmem_roundup_size(vmem_t *vm, vmem_size_t size) 1090{ 1091 1092 return (size + vm->vm_quantum_mask) & ~vm->vm_quantum_mask; 1093} 1094 1095/* 1096 * vmem_alloc: 1097 * 1098 * => caller must ensure appropriate spl, 1099 * if the arena can be accessed from interrupt context. 1100 */ 1101 1102int 1103vmem_alloc(vmem_t *vm, vmem_size_t size, vm_flag_t flags, vmem_addr_t *addrp) 1104{ 1105 const vm_flag_t strat __unused = flags & VM_FITMASK; 1106 1107 KASSERT((flags & (VM_SLEEP|VM_NOSLEEP)) != 0); 1108 KASSERT((~flags & (VM_SLEEP|VM_NOSLEEP)) != 0); 1109 1110 KASSERT(size > 0); 1111 KASSERT(strat == VM_BESTFIT || strat == VM_INSTANTFIT); 1112 if ((flags & VM_SLEEP) != 0) { 1113 ASSERT_SLEEPABLE(); 1114 } 1115 1116#if defined(QCACHE) 1117 if (size <= vm->vm_qcache_max) { 1118 void *p; 1119 int qidx = (size + vm->vm_quantum_mask) >> vm->vm_quantum_shift; 1120 qcache_t *qc = vm->vm_qcache[qidx - 1]; 1121 1122 p = pool_cache_get(qc->qc_cache, vmf_to_prf(flags)); 1123 if (addrp != NULL) 1124 *addrp = (vmem_addr_t)p; 1125 return (p == NULL) ? ENOMEM : 0; 1126 } 1127#endif /* defined(QCACHE) */ 1128 1129 return vmem_xalloc(vm, size, 0, 0, 0, VMEM_ADDR_MIN, VMEM_ADDR_MAX, 1130 flags, addrp); 1131} 1132 1133int 1134vmem_xalloc(vmem_t *vm, const vmem_size_t size0, vmem_size_t align, 1135 const vmem_size_t phase, const vmem_size_t nocross, 1136 const vmem_addr_t minaddr, const vmem_addr_t maxaddr, const vm_flag_t flags, 1137 vmem_addr_t *addrp) 1138{ 1139 struct vmem_freelist *list; 1140 struct vmem_freelist *first; 1141 struct vmem_freelist *end; 1142 bt_t *bt; 1143 bt_t *btnew; 1144 bt_t *btnew2; 1145 const vmem_size_t size = vmem_roundup_size(vm, size0); 1146 vm_flag_t strat = flags & VM_FITMASK; 1147 vmem_addr_t start; 1148 int rc; 1149 1150 KASSERT(size0 > 0); 1151 KASSERT(size > 0); 1152 KASSERT(strat == VM_BESTFIT || strat == VM_INSTANTFIT); 1153 if ((flags & VM_SLEEP) != 0) { 1154 ASSERT_SLEEPABLE(); 1155 } 1156 KASSERT((align & vm->vm_quantum_mask) == 0); 1157 KASSERT((align & (align - 1)) == 0); 1158 KASSERT((phase & vm->vm_quantum_mask) == 0); 1159 KASSERT((nocross & vm->vm_quantum_mask) == 0); 1160 KASSERT((nocross & (nocross - 1)) == 0); 1161 KASSERT((align == 0 && phase == 0) || phase < align); 1162 KASSERT(nocross == 0 || nocross >= size); 1163 KASSERT(minaddr <= maxaddr); 1164 KASSERT(!VMEM_CROSS_P(phase, phase + size - 1, nocross)); 1165 1166 if (align == 0) { 1167 align = vm->vm_quantum_mask + 1; 1168 } 1169 1170 /* 1171 * allocate boundary tags before acquiring the vmem lock. 1172 */ 1173 btnew = bt_alloc(vm, flags); 1174 if (btnew == NULL) { 1175 return ENOMEM; 1176 } 1177 btnew2 = bt_alloc(vm, flags); /* XXX not necessary if no restrictions */ 1178 if (btnew2 == NULL) { 1179 bt_free(vm, btnew); 1180 return ENOMEM; 1181 } 1182 1183 /* 1184 * choose a free block from which we allocate. 1185 */ 1186retry_strat: 1187 first = bt_freehead_toalloc(vm, size, strat); 1188 end = &vm->vm_freelist[VMEM_MAXORDER]; 1189retry: 1190 bt = NULL; 1191 VMEM_LOCK(vm); 1192 vmem_check(vm); 1193 if (strat == VM_INSTANTFIT) { 1194 /* 1195 * just choose the first block which satisfies our restrictions. 1196 * 1197 * note that we don't need to check the size of the blocks 1198 * because any blocks found on these list should be larger than 1199 * the given size. 1200 */ 1201 for (list = first; list < end; list++) { 1202 bt = LIST_FIRST(list); 1203 if (bt != NULL) { 1204 rc = vmem_fit(bt, size, align, phase, 1205 nocross, minaddr, maxaddr, &start); 1206 if (rc == 0) { 1207 goto gotit; 1208 } 1209 /* 1210 * don't bother to follow the bt_freelist link 1211 * here. the list can be very long and we are 1212 * told to run fast. blocks from the later free 1213 * lists are larger and have better chances to 1214 * satisfy our restrictions. 1215 */ 1216 } 1217 } 1218 } else { /* VM_BESTFIT */ 1219 /* 1220 * we assume that, for space efficiency, it's better to 1221 * allocate from a smaller block. thus we will start searching 1222 * from the lower-order list than VM_INSTANTFIT. 1223 * however, don't bother to find the smallest block in a free 1224 * list because the list can be very long. we can revisit it 1225 * if/when it turns out to be a problem. 1226 * 1227 * note that the 'first' list can contain blocks smaller than 1228 * the requested size. thus we need to check bt_size. 1229 */ 1230 for (list = first; list < end; list++) { 1231 LIST_FOREACH(bt, list, bt_freelist) { 1232 if (bt->bt_size >= size) { 1233 rc = vmem_fit(bt, size, align, phase, 1234 nocross, minaddr, maxaddr, &start); 1235 if (rc == 0) { 1236 goto gotit; 1237 } 1238 } 1239 } 1240 } 1241 } 1242 VMEM_UNLOCK(vm); 1243#if 1 1244 if (strat == VM_INSTANTFIT) { 1245 strat = VM_BESTFIT; 1246 goto retry_strat; 1247 } 1248#endif 1249 if (align != vm->vm_quantum_mask + 1 || phase != 0 || nocross != 0) { 1250 1251 /* 1252 * XXX should try to import a region large enough to 1253 * satisfy restrictions? 1254 */ 1255 1256 goto fail; 1257 } 1258 /* XXX eeek, minaddr & maxaddr not respected */ 1259 if (vmem_import(vm, size, flags) == 0) { 1260 goto retry; 1261 } 1262 /* XXX */ 1263 1264 if ((flags & VM_SLEEP) != 0) { 1265#if defined(_KERNEL) && !defined(_RUMPKERNEL) 1266 mutex_spin_enter(&uvm_fpageqlock); 1267 uvm_kick_pdaemon(); 1268 mutex_spin_exit(&uvm_fpageqlock); 1269#endif 1270 VMEM_LOCK(vm); 1271 VMEM_CONDVAR_WAIT(vm); 1272 VMEM_UNLOCK(vm); 1273 goto retry; 1274 } 1275fail: 1276 bt_free(vm, btnew); 1277 bt_free(vm, btnew2); 1278 return ENOMEM; 1279 1280gotit: 1281 KASSERT(bt->bt_type == BT_TYPE_FREE); 1282 KASSERT(bt->bt_size >= size); 1283 bt_remfree(vm, bt); 1284 vmem_check(vm); 1285 if (bt->bt_start != start) { 1286 btnew2->bt_type = BT_TYPE_FREE; 1287 btnew2->bt_start = bt->bt_start; 1288 btnew2->bt_size = start - bt->bt_start; 1289 bt->bt_start = start; 1290 bt->bt_size -= btnew2->bt_size; 1291 bt_insfree(vm, btnew2); 1292 bt_insseg(vm, btnew2, CIRCLEQ_PREV(bt, bt_seglist)); 1293 btnew2 = NULL; 1294 vmem_check(vm); 1295 } 1296 KASSERT(bt->bt_start == start); 1297 if (bt->bt_size != size && bt->bt_size - size > vm->vm_quantum_mask) { 1298 /* split */ 1299 btnew->bt_type = BT_TYPE_BUSY; 1300 btnew->bt_start = bt->bt_start; 1301 btnew->bt_size = size; 1302 bt->bt_start = bt->bt_start + size; 1303 bt->bt_size -= size; 1304 bt_insfree(vm, bt); 1305 bt_insseg(vm, btnew, CIRCLEQ_PREV(bt, bt_seglist)); 1306 bt_insbusy(vm, btnew); 1307 vmem_check(vm); 1308 VMEM_UNLOCK(vm); 1309 } else { 1310 bt->bt_type = BT_TYPE_BUSY; 1311 bt_insbusy(vm, bt); 1312 vmem_check(vm); 1313 VMEM_UNLOCK(vm); 1314 bt_free(vm, btnew); 1315 btnew = bt; 1316 } 1317 if (btnew2 != NULL) { 1318 bt_free(vm, btnew2); 1319 } 1320 KASSERT(btnew->bt_size >= size); 1321 btnew->bt_type = BT_TYPE_BUSY; 1322 1323 if (addrp != NULL) 1324 *addrp = btnew->bt_start; 1325 return 0; 1326} 1327 1328/* 1329 * vmem_free: 1330 * 1331 * => caller must ensure appropriate spl, 1332 * if the arena can be accessed from interrupt context. 1333 */ 1334 1335void 1336vmem_free(vmem_t *vm, vmem_addr_t addr, vmem_size_t size) 1337{ 1338 1339 KASSERT(size > 0); 1340 1341#if defined(QCACHE) 1342 if (size <= vm->vm_qcache_max) { 1343 int qidx = (size + vm->vm_quantum_mask) >> vm->vm_quantum_shift; 1344 qcache_t *qc = vm->vm_qcache[qidx - 1]; 1345 1346 pool_cache_put(qc->qc_cache, (void *)addr); 1347 return; 1348 } 1349#endif /* defined(QCACHE) */ 1350 1351 vmem_xfree(vm, addr, size); 1352} 1353 1354void 1355vmem_xfree(vmem_t *vm, vmem_addr_t addr, vmem_size_t size) 1356{ 1357 bt_t *bt; 1358 bt_t *t; 1359 LIST_HEAD(, vmem_btag) tofree; 1360 1361 LIST_INIT(&tofree); 1362 1363 KASSERT(size > 0); 1364 1365 VMEM_LOCK(vm); 1366 1367 bt = bt_lookupbusy(vm, addr); 1368 KASSERT(bt != NULL); 1369 KASSERT(bt->bt_start == addr); 1370 KASSERT(bt->bt_size == vmem_roundup_size(vm, size) || 1371 bt->bt_size - vmem_roundup_size(vm, size) <= vm->vm_quantum_mask); 1372 KASSERT(bt->bt_type == BT_TYPE_BUSY); 1373 bt_rembusy(vm, bt); 1374 bt->bt_type = BT_TYPE_FREE; 1375 1376 /* coalesce */ 1377 t = CIRCLEQ_NEXT(bt, bt_seglist); 1378 if (t != NULL && t->bt_type == BT_TYPE_FREE) { 1379 KASSERT(BT_END(bt) < t->bt_start); /* YYY */ 1380 bt_remfree(vm, t); 1381 bt_remseg(vm, t); 1382 bt->bt_size += t->bt_size; 1383 LIST_INSERT_HEAD(&tofree, t, bt_freelist); 1384 } 1385 t = CIRCLEQ_PREV(bt, bt_seglist); 1386 if (t != NULL && t->bt_type == BT_TYPE_FREE) { 1387 KASSERT(BT_END(t) < bt->bt_start); /* YYY */ 1388 bt_remfree(vm, t); 1389 bt_remseg(vm, t); 1390 bt->bt_size += t->bt_size; 1391 bt->bt_start = t->bt_start; 1392 LIST_INSERT_HEAD(&tofree, t, bt_freelist); 1393 } 1394 1395 t = CIRCLEQ_PREV(bt, bt_seglist); 1396 KASSERT(t != NULL); 1397 KASSERT(BT_ISSPAN_P(t) || t->bt_type == BT_TYPE_BUSY); 1398 if (vm->vm_releasefn != NULL && t->bt_type == BT_TYPE_SPAN && 1399 t->bt_size == bt->bt_size) { 1400 vmem_addr_t spanaddr; 1401 vmem_size_t spansize; 1402 1403 KASSERT(t->bt_start == bt->bt_start); 1404 spanaddr = bt->bt_start; 1405 spansize = bt->bt_size; 1406 bt_remseg(vm, bt); 1407 LIST_INSERT_HEAD(&tofree, bt, bt_freelist); 1408 bt_remseg(vm, t); 1409 LIST_INSERT_HEAD(&tofree, t, bt_freelist); 1410 vm->vm_size -= spansize; 1411 VMEM_CONDVAR_BROADCAST(vm); 1412 VMEM_UNLOCK(vm); 1413 (*vm->vm_releasefn)(vm->vm_arg, spanaddr, spansize); 1414 } else { 1415 bt_insfree(vm, bt); 1416 VMEM_CONDVAR_BROADCAST(vm); 1417 VMEM_UNLOCK(vm); 1418 } 1419 1420 while (!LIST_EMPTY(&tofree)) { 1421 t = LIST_FIRST(&tofree); 1422 LIST_REMOVE(t, bt_freelist); 1423 bt_free(vm, t); 1424 } 1425} 1426 1427/* 1428 * vmem_add: 1429 * 1430 * => caller must ensure appropriate spl, 1431 * if the arena can be accessed from interrupt context. 1432 */ 1433 1434int 1435vmem_add(vmem_t *vm, vmem_addr_t addr, vmem_size_t size, vm_flag_t flags) 1436{ 1437 1438 return vmem_add1(vm, addr, size, flags, BT_TYPE_SPAN_STATIC); 1439} 1440 1441/* 1442 * vmem_size: information about arenas size 1443 * 1444 * => return free/allocated size in arena 1445 */ 1446vmem_size_t 1447vmem_size(vmem_t *vm, int typemask) 1448{ 1449 1450 switch (typemask) { 1451 case VMEM_ALLOC: 1452 return vm->vm_inuse; 1453 case VMEM_FREE: 1454 return vm->vm_size - vm->vm_inuse; 1455 case VMEM_FREE|VMEM_ALLOC: 1456 return vm->vm_size; 1457 default: 1458 panic("vmem_size"); 1459 } 1460} 1461 1462/* ---- rehash */ 1463 1464#if defined(_KERNEL) 1465static struct callout vmem_rehash_ch; 1466static int vmem_rehash_interval; 1467static struct workqueue *vmem_rehash_wq; 1468static struct work vmem_rehash_wk; 1469 1470static void 1471vmem_rehash_all(struct work *wk, void *dummy) 1472{ 1473 vmem_t *vm; 1474 1475 KASSERT(wk == &vmem_rehash_wk); 1476 mutex_enter(&vmem_list_lock); 1477 LIST_FOREACH(vm, &vmem_list, vm_alllist) { 1478 size_t desired; 1479 size_t current; 1480 1481 if (!VMEM_TRYLOCK(vm)) { 1482 continue; 1483 } 1484 desired = vm->vm_nbusytag; 1485 current = vm->vm_hashsize; 1486 VMEM_UNLOCK(vm); 1487 1488 if (desired > VMEM_HASHSIZE_MAX) { 1489 desired = VMEM_HASHSIZE_MAX; 1490 } else if (desired < VMEM_HASHSIZE_MIN) { 1491 desired = VMEM_HASHSIZE_MIN; 1492 } 1493 if (desired > current * 2 || desired * 2 < current) { 1494 vmem_rehash(vm, desired, VM_NOSLEEP); 1495 } 1496 } 1497 mutex_exit(&vmem_list_lock); 1498 1499 callout_schedule(&vmem_rehash_ch, vmem_rehash_interval); 1500} 1501 1502static void 1503vmem_rehash_all_kick(void *dummy) 1504{ 1505 1506 workqueue_enqueue(vmem_rehash_wq, &vmem_rehash_wk, NULL); 1507} 1508 1509void 1510vmem_rehash_start(void) 1511{ 1512 int error; 1513 1514 error = workqueue_create(&vmem_rehash_wq, "vmem_rehash", 1515 vmem_rehash_all, NULL, PRI_VM, IPL_SOFTCLOCK, WQ_MPSAFE); 1516 if (error) { 1517 panic("%s: workqueue_create %d\n", __func__, error); 1518 } 1519 callout_init(&vmem_rehash_ch, CALLOUT_MPSAFE); 1520 callout_setfunc(&vmem_rehash_ch, vmem_rehash_all_kick, NULL); 1521 1522 vmem_rehash_interval = hz * 10; 1523 callout_schedule(&vmem_rehash_ch, vmem_rehash_interval); 1524} 1525#endif /* defined(_KERNEL) */ 1526 1527/* ---- debug */ 1528 1529#if defined(DDB) || defined(UNITTEST) || defined(VMEM_SANITY) 1530 1531static void bt_dump(const bt_t *, void (*)(const char *, ...)); 1532 1533static const char * 1534bt_type_string(int type) 1535{ 1536 static const char * const table[] = { 1537 [BT_TYPE_BUSY] = "busy", 1538 [BT_TYPE_FREE] = "free", 1539 [BT_TYPE_SPAN] = "span", 1540 [BT_TYPE_SPAN_STATIC] = "static span", 1541 }; 1542 1543 if (type >= __arraycount(table)) { 1544 return "BOGUS"; 1545 } 1546 return table[type]; 1547} 1548 1549static void 1550bt_dump(const bt_t *bt, void (*pr)(const char *, ...)) 1551{ 1552 1553 (*pr)("\t%p: %" PRIu64 ", %" PRIu64 ", %d(%s)\n", 1554 bt, (uint64_t)bt->bt_start, (uint64_t)bt->bt_size, 1555 bt->bt_type, bt_type_string(bt->bt_type)); 1556} 1557 1558static void 1559vmem_dump(const vmem_t *vm , void (*pr)(const char *, ...)) 1560{ 1561 const bt_t *bt; 1562 int i; 1563 1564 (*pr)("vmem %p '%s'\n", vm, vm->vm_name); 1565 CIRCLEQ_FOREACH(bt, &vm->vm_seglist, bt_seglist) { 1566 bt_dump(bt, pr); 1567 } 1568 1569 for (i = 0; i < VMEM_MAXORDER; i++) { 1570 const struct vmem_freelist *fl = &vm->vm_freelist[i]; 1571 1572 if (LIST_EMPTY(fl)) { 1573 continue; 1574 } 1575 1576 (*pr)("freelist[%d]\n", i); 1577 LIST_FOREACH(bt, fl, bt_freelist) { 1578 bt_dump(bt, pr); 1579 } 1580 } 1581} 1582 1583#endif /* defined(DDB) || defined(UNITTEST) || defined(VMEM_SANITY) */ 1584 1585#if defined(DDB) 1586static bt_t * 1587vmem_whatis_lookup(vmem_t *vm, uintptr_t addr) 1588{ 1589 bt_t *bt; 1590 1591 CIRCLEQ_FOREACH(bt, &vm->vm_seglist, bt_seglist) { 1592 if (BT_ISSPAN_P(bt)) { 1593 continue; 1594 } 1595 if (bt->bt_start <= addr && addr <= BT_END(bt)) { 1596 return bt; 1597 } 1598 } 1599 1600 return NULL; 1601} 1602 1603void 1604vmem_whatis(uintptr_t addr, void (*pr)(const char *, ...)) 1605{ 1606 vmem_t *vm; 1607 1608 LIST_FOREACH(vm, &vmem_list, vm_alllist) { 1609 bt_t *bt; 1610 1611 bt = vmem_whatis_lookup(vm, addr); 1612 if (bt == NULL) { 1613 continue; 1614 } 1615 (*pr)("%p is %p+%zu in VMEM '%s' (%s)\n", 1616 (void *)addr, (void *)bt->bt_start, 1617 (size_t)(addr - bt->bt_start), vm->vm_name, 1618 (bt->bt_type == BT_TYPE_BUSY) ? "allocated" : "free"); 1619 } 1620} 1621 1622void 1623vmem_printall(const char *modif, void (*pr)(const char *, ...)) 1624{ 1625 const vmem_t *vm; 1626 1627 LIST_FOREACH(vm, &vmem_list, vm_alllist) { 1628 vmem_dump(vm, pr); 1629 } 1630} 1631 1632void 1633vmem_print(uintptr_t addr, const char *modif, void (*pr)(const char *, ...)) 1634{ 1635 const vmem_t *vm = (const void *)addr; 1636 1637 vmem_dump(vm, pr); 1638} 1639#endif /* defined(DDB) */ 1640 1641#if defined(_KERNEL) 1642#define vmem_printf printf 1643#else 1644#include <stdio.h> 1645#include <stdarg.h> 1646 1647static void 1648vmem_printf(const char *fmt, ...) 1649{ 1650 va_list ap; 1651 va_start(ap, fmt); 1652 vprintf(fmt, ap); 1653 va_end(ap); 1654} 1655#endif 1656 1657#if defined(VMEM_SANITY) 1658 1659static bool 1660vmem_check_sanity(vmem_t *vm) 1661{ 1662 const bt_t *bt, *bt2; 1663 1664 KASSERT(vm != NULL); 1665 1666 CIRCLEQ_FOREACH(bt, &vm->vm_seglist, bt_seglist) { 1667 if (bt->bt_start > BT_END(bt)) { 1668 printf("corrupted tag\n"); 1669 bt_dump(bt, vmem_printf); 1670 return false; 1671 } 1672 } 1673 CIRCLEQ_FOREACH(bt, &vm->vm_seglist, bt_seglist) { 1674 CIRCLEQ_FOREACH(bt2, &vm->vm_seglist, bt_seglist) { 1675 if (bt == bt2) { 1676 continue; 1677 } 1678 if (BT_ISSPAN_P(bt) != BT_ISSPAN_P(bt2)) { 1679 continue; 1680 } 1681 if (bt->bt_start <= BT_END(bt2) && 1682 bt2->bt_start <= BT_END(bt)) { 1683 printf("overwrapped tags\n"); 1684 bt_dump(bt, vmem_printf); 1685 bt_dump(bt2, vmem_printf); 1686 return false; 1687 } 1688 } 1689 } 1690 1691 return true; 1692} 1693 1694static void 1695vmem_check(vmem_t *vm) 1696{ 1697 1698 if (!vmem_check_sanity(vm)) { 1699 panic("insanity vmem %p", vm); 1700 } 1701} 1702 1703#endif /* defined(VMEM_SANITY) */ 1704 1705#if defined(UNITTEST) 1706int 1707main(void) 1708{ 1709 int rc; 1710 vmem_t *vm; 1711 vmem_addr_t p; 1712 struct reg { 1713 vmem_addr_t p; 1714 vmem_size_t sz; 1715 bool x; 1716 } *reg = NULL; 1717 int nreg = 0; 1718 int nalloc = 0; 1719 int nfree = 0; 1720 vmem_size_t total = 0; 1721#if 1 1722 vm_flag_t strat = VM_INSTANTFIT; 1723#else 1724 vm_flag_t strat = VM_BESTFIT; 1725#endif 1726 1727 vm = vmem_create("test", 0, 0, 1, NULL, NULL, NULL, 0, VM_SLEEP, 1728#ifdef _KERNEL 1729 IPL_NONE 1730#else 1731 0 1732#endif 1733 ); 1734 if (vm == NULL) { 1735 printf("vmem_create\n"); 1736 exit(EXIT_FAILURE); 1737 } 1738 vmem_dump(vm, vmem_printf); 1739 1740 rc = vmem_add(vm, 0, 50, VM_SLEEP); 1741 assert(rc == 0); 1742 rc = vmem_add(vm, 100, 200, VM_SLEEP); 1743 assert(rc == 0); 1744 rc = vmem_add(vm, 2000, 1, VM_SLEEP); 1745 assert(rc == 0); 1746 rc = vmem_add(vm, 40000, 65536, VM_SLEEP); 1747 assert(rc == 0); 1748 rc = vmem_add(vm, 10000, 10000, VM_SLEEP); 1749 assert(rc == 0); 1750 rc = vmem_add(vm, 500, 1000, VM_SLEEP); 1751 assert(rc == 0); 1752 rc = vmem_add(vm, 0xffffff00, 0x100, VM_SLEEP); 1753 assert(rc == 0); 1754 rc = vmem_xalloc(vm, 0x101, 0, 0, 0, 1755 0xffffff00, 0xffffffff, strat|VM_SLEEP, &p); 1756 assert(rc != 0); 1757 rc = vmem_xalloc(vm, 50, 0, 0, 0, 0, 49, strat|VM_SLEEP, &p); 1758 assert(rc == 0 && p == 0); 1759 vmem_xfree(vm, p, 50); 1760 rc = vmem_xalloc(vm, 25, 0, 0, 0, 0, 24, strat|VM_SLEEP, &p); 1761 assert(rc == 0 && p == 0); 1762 rc = vmem_xalloc(vm, 0x100, 0, 0, 0, 1763 0xffffff01, 0xffffffff, strat|VM_SLEEP, &p); 1764 assert(rc != 0); 1765 rc = vmem_xalloc(vm, 0x100, 0, 0, 0, 1766 0xffffff00, 0xfffffffe, strat|VM_SLEEP, &p); 1767 assert(rc != 0); 1768 rc = vmem_xalloc(vm, 0x100, 0, 0, 0, 1769 0xffffff00, 0xffffffff, strat|VM_SLEEP, &p); 1770 assert(rc == 0); 1771 vmem_dump(vm, vmem_printf); 1772 for (;;) { 1773 struct reg *r; 1774 int t = rand() % 100; 1775 1776 if (t > 45) { 1777 /* alloc */ 1778 vmem_size_t sz = rand() % 500 + 1; 1779 bool x; 1780 vmem_size_t align, phase, nocross; 1781 vmem_addr_t minaddr, maxaddr; 1782 1783 if (t > 70) { 1784 x = true; 1785 /* XXX */ 1786 align = 1 << (rand() % 15); 1787 phase = rand() % 65536; 1788 nocross = 1 << (rand() % 15); 1789 if (align <= phase) { 1790 phase = 0; 1791 } 1792 if (VMEM_CROSS_P(phase, phase + sz - 1, 1793 nocross)) { 1794 nocross = 0; 1795 } 1796 do { 1797 minaddr = rand() % 50000; 1798 maxaddr = rand() % 70000; 1799 } while (minaddr > maxaddr); 1800 printf("=== xalloc %" PRIu64 1801 " align=%" PRIu64 ", phase=%" PRIu64 1802 ", nocross=%" PRIu64 ", min=%" PRIu64 1803 ", max=%" PRIu64 "\n", 1804 (uint64_t)sz, 1805 (uint64_t)align, 1806 (uint64_t)phase, 1807 (uint64_t)nocross, 1808 (uint64_t)minaddr, 1809 (uint64_t)maxaddr); 1810 rc = vmem_xalloc(vm, sz, align, phase, nocross, 1811 minaddr, maxaddr, strat|VM_SLEEP, &p); 1812 } else { 1813 x = false; 1814 printf("=== alloc %" PRIu64 "\n", (uint64_t)sz); 1815 rc = vmem_alloc(vm, sz, strat|VM_SLEEP, &p); 1816 } 1817 printf("-> %" PRIu64 "\n", (uint64_t)p); 1818 vmem_dump(vm, vmem_printf); 1819 if (rc != 0) { 1820 if (x) { 1821 continue; 1822 } 1823 break; 1824 } 1825 nreg++; 1826 reg = realloc(reg, sizeof(*reg) * nreg); 1827 r = ®[nreg - 1]; 1828 r->p = p; 1829 r->sz = sz; 1830 r->x = x; 1831 total += sz; 1832 nalloc++; 1833 } else if (nreg != 0) { 1834 /* free */ 1835 r = ®[rand() % nreg]; 1836 printf("=== free %" PRIu64 ", %" PRIu64 "\n", 1837 (uint64_t)r->p, (uint64_t)r->sz); 1838 if (r->x) { 1839 vmem_xfree(vm, r->p, r->sz); 1840 } else { 1841 vmem_free(vm, r->p, r->sz); 1842 } 1843 total -= r->sz; 1844 vmem_dump(vm, vmem_printf); 1845 *r = reg[nreg - 1]; 1846 nreg--; 1847 nfree++; 1848 } 1849 printf("total=%" PRIu64 "\n", (uint64_t)total); 1850 } 1851 fprintf(stderr, "total=%" PRIu64 ", nalloc=%d, nfree=%d\n", 1852 (uint64_t)total, nalloc, nfree); 1853 exit(EXIT_SUCCESS); 1854} 1855#endif /* defined(UNITTEST) */ 1856