vfs_bio.c revision 1.252
1/* $NetBSD: vfs_bio.c,v 1.252 2014/09/08 22:01:24 joerg Exp $ */ 2 3/*- 4 * Copyright (c) 2007, 2008, 2009 The NetBSD Foundation, Inc. 5 * All rights reserved. 6 * 7 * This code is derived from software contributed to The NetBSD Foundation 8 * by Andrew Doran, and by Wasabi Systems, Inc. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 29 * POSSIBILITY OF SUCH DAMAGE. 30 */ 31 32/*- 33 * Copyright (c) 1982, 1986, 1989, 1993 34 * The Regents of the University of California. All rights reserved. 35 * (c) UNIX System Laboratories, Inc. 36 * All or some portions of this file are derived from material licensed 37 * to the University of California by American Telephone and Telegraph 38 * Co. or Unix System Laboratories, Inc. and are reproduced herein with 39 * the permission of UNIX System Laboratories, Inc. 40 * 41 * Redistribution and use in source and binary forms, with or without 42 * modification, are permitted provided that the following conditions 43 * are met: 44 * 1. Redistributions of source code must retain the above copyright 45 * notice, this list of conditions and the following disclaimer. 46 * 2. Redistributions in binary form must reproduce the above copyright 47 * notice, this list of conditions and the following disclaimer in the 48 * documentation and/or other materials provided with the distribution. 49 * 3. Neither the name of the University nor the names of its contributors 50 * may be used to endorse or promote products derived from this software 51 * without specific prior written permission. 52 * 53 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 54 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 55 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 56 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 57 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 58 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 59 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 60 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 61 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 62 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 63 * SUCH DAMAGE. 64 * 65 * @(#)vfs_bio.c 8.6 (Berkeley) 1/11/94 66 */ 67 68/*- 69 * Copyright (c) 1994 Christopher G. Demetriou 70 * 71 * Redistribution and use in source and binary forms, with or without 72 * modification, are permitted provided that the following conditions 73 * are met: 74 * 1. Redistributions of source code must retain the above copyright 75 * notice, this list of conditions and the following disclaimer. 76 * 2. Redistributions in binary form must reproduce the above copyright 77 * notice, this list of conditions and the following disclaimer in the 78 * documentation and/or other materials provided with the distribution. 79 * 3. All advertising materials mentioning features or use of this software 80 * must display the following acknowledgement: 81 * This product includes software developed by the University of 82 * California, Berkeley and its contributors. 83 * 4. Neither the name of the University nor the names of its contributors 84 * may be used to endorse or promote products derived from this software 85 * without specific prior written permission. 86 * 87 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 88 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 89 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 90 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 91 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 92 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 93 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 94 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 95 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 96 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 97 * SUCH DAMAGE. 98 * 99 * @(#)vfs_bio.c 8.6 (Berkeley) 1/11/94 100 */ 101 102/* 103 * The buffer cache subsystem. 104 * 105 * Some references: 106 * Bach: The Design of the UNIX Operating System (Prentice Hall, 1986) 107 * Leffler, et al.: The Design and Implementation of the 4.3BSD 108 * UNIX Operating System (Addison Welley, 1989) 109 * 110 * Locking 111 * 112 * There are three locks: 113 * - bufcache_lock: protects global buffer cache state. 114 * - BC_BUSY: a long term per-buffer lock. 115 * - buf_t::b_objlock: lock on completion (biowait vs biodone). 116 * 117 * For buffers associated with vnodes (a most common case) b_objlock points 118 * to the vnode_t::v_interlock. Otherwise, it points to generic buffer_lock. 119 * 120 * Lock order: 121 * bufcache_lock -> 122 * buf_t::b_objlock 123 */ 124 125#include <sys/cdefs.h> 126__KERNEL_RCSID(0, "$NetBSD: vfs_bio.c,v 1.252 2014/09/08 22:01:24 joerg Exp $"); 127 128#include "opt_bufcache.h" 129 130#include <sys/param.h> 131#include <sys/systm.h> 132#include <sys/kernel.h> 133#include <sys/proc.h> 134#include <sys/buf.h> 135#include <sys/vnode.h> 136#include <sys/mount.h> 137#include <sys/resourcevar.h> 138#include <sys/sysctl.h> 139#include <sys/conf.h> 140#include <sys/kauth.h> 141#include <sys/fstrans.h> 142#include <sys/intr.h> 143#include <sys/cpu.h> 144#include <sys/wapbl.h> 145#include <sys/bitops.h> 146#include <sys/cprng.h> 147 148#include <uvm/uvm.h> /* extern struct uvm uvm */ 149 150#include <miscfs/specfs/specdev.h> 151 152#ifndef BUFPAGES 153# define BUFPAGES 0 154#endif 155 156#ifdef BUFCACHE 157# if (BUFCACHE < 5) || (BUFCACHE > 95) 158# error BUFCACHE is not between 5 and 95 159# endif 160#else 161# define BUFCACHE 15 162#endif 163 164u_int nbuf; /* desired number of buffer headers */ 165u_int bufpages = BUFPAGES; /* optional hardwired count */ 166u_int bufcache = BUFCACHE; /* max % of RAM to use for buffer cache */ 167 168/* Function prototypes */ 169struct bqueue; 170 171static void buf_setwm(void); 172static int buf_trim(void); 173static void *bufpool_page_alloc(struct pool *, int); 174static void bufpool_page_free(struct pool *, void *); 175static buf_t *bio_doread(struct vnode *, daddr_t, int, 176 kauth_cred_t, int); 177static buf_t *getnewbuf(int, int, int); 178static int buf_lotsfree(void); 179static int buf_canrelease(void); 180static u_long buf_mempoolidx(u_long); 181static u_long buf_roundsize(u_long); 182static void *buf_alloc(size_t); 183static void buf_mrelease(void *, size_t); 184static void binsheadfree(buf_t *, struct bqueue *); 185static void binstailfree(buf_t *, struct bqueue *); 186#ifdef DEBUG 187static int checkfreelist(buf_t *, struct bqueue *, int); 188#endif 189static void biointr(void *); 190static void biodone2(buf_t *); 191static void bref(buf_t *); 192static void brele(buf_t *); 193static void sysctl_kern_buf_setup(void); 194static void sysctl_vm_buf_setup(void); 195 196/* 197 * Definitions for the buffer hash lists. 198 */ 199#define BUFHASH(dvp, lbn) \ 200 (&bufhashtbl[(((long)(dvp) >> 8) + (int)(lbn)) & bufhash]) 201LIST_HEAD(bufhashhdr, buf) *bufhashtbl, invalhash; 202u_long bufhash; 203struct bqueue bufqueues[BQUEUES]; 204 205static kcondvar_t needbuffer_cv; 206 207/* 208 * Buffer queue lock. 209 */ 210kmutex_t bufcache_lock; 211kmutex_t buffer_lock; 212 213/* Software ISR for completed transfers. */ 214static void *biodone_sih; 215 216/* Buffer pool for I/O buffers. */ 217static pool_cache_t buf_cache; 218static pool_cache_t bufio_cache; 219 220#define MEMPOOL_INDEX_OFFSET (ilog2(DEV_BSIZE)) /* smallest pool is 512 bytes */ 221#define NMEMPOOLS (ilog2(MAXBSIZE) - MEMPOOL_INDEX_OFFSET + 1) 222__CTASSERT((1 << (NMEMPOOLS + MEMPOOL_INDEX_OFFSET - 1)) == MAXBSIZE); 223 224/* Buffer memory pools */ 225static struct pool bmempools[NMEMPOOLS]; 226 227static struct vm_map *buf_map; 228 229/* 230 * Buffer memory pool allocator. 231 */ 232static void * 233bufpool_page_alloc(struct pool *pp, int flags) 234{ 235 236 return (void *)uvm_km_alloc(buf_map, 237 MAXBSIZE, MAXBSIZE, 238 ((flags & PR_WAITOK) ? 0 : UVM_KMF_NOWAIT|UVM_KMF_TRYLOCK) 239 | UVM_KMF_WIRED); 240} 241 242static void 243bufpool_page_free(struct pool *pp, void *v) 244{ 245 246 uvm_km_free(buf_map, (vaddr_t)v, MAXBSIZE, UVM_KMF_WIRED); 247} 248 249static struct pool_allocator bufmempool_allocator = { 250 .pa_alloc = bufpool_page_alloc, 251 .pa_free = bufpool_page_free, 252 .pa_pagesz = MAXBSIZE, 253}; 254 255/* Buffer memory management variables */ 256u_long bufmem_valimit; 257u_long bufmem_hiwater; 258u_long bufmem_lowater; 259u_long bufmem; 260 261/* 262 * MD code can call this to set a hard limit on the amount 263 * of virtual memory used by the buffer cache. 264 */ 265int 266buf_setvalimit(vsize_t sz) 267{ 268 269 /* We need to accommodate at least NMEMPOOLS of MAXBSIZE each */ 270 if (sz < NMEMPOOLS * MAXBSIZE) 271 return EINVAL; 272 273 bufmem_valimit = sz; 274 return 0; 275} 276 277static void 278buf_setwm(void) 279{ 280 281 bufmem_hiwater = buf_memcalc(); 282 /* lowater is approx. 2% of memory (with bufcache = 15) */ 283#define BUFMEM_WMSHIFT 3 284#define BUFMEM_HIWMMIN (64 * 1024 << BUFMEM_WMSHIFT) 285 if (bufmem_hiwater < BUFMEM_HIWMMIN) 286 /* Ensure a reasonable minimum value */ 287 bufmem_hiwater = BUFMEM_HIWMMIN; 288 bufmem_lowater = bufmem_hiwater >> BUFMEM_WMSHIFT; 289} 290 291#ifdef DEBUG 292int debug_verify_freelist = 0; 293static int 294checkfreelist(buf_t *bp, struct bqueue *dp, int ison) 295{ 296 buf_t *b; 297 298 if (!debug_verify_freelist) 299 return 1; 300 301 TAILQ_FOREACH(b, &dp->bq_queue, b_freelist) { 302 if (b == bp) 303 return ison ? 1 : 0; 304 } 305 306 return ison ? 0 : 1; 307} 308#endif 309 310/* 311 * Insq/Remq for the buffer hash lists. 312 * Call with buffer queue locked. 313 */ 314static void 315binsheadfree(buf_t *bp, struct bqueue *dp) 316{ 317 318 KASSERT(mutex_owned(&bufcache_lock)); 319 KASSERT(bp->b_freelistindex == -1); 320 TAILQ_INSERT_HEAD(&dp->bq_queue, bp, b_freelist); 321 dp->bq_bytes += bp->b_bufsize; 322 bp->b_freelistindex = dp - bufqueues; 323} 324 325static void 326binstailfree(buf_t *bp, struct bqueue *dp) 327{ 328 329 KASSERT(mutex_owned(&bufcache_lock)); 330 KASSERT(bp->b_freelistindex == -1); 331 TAILQ_INSERT_TAIL(&dp->bq_queue, bp, b_freelist); 332 dp->bq_bytes += bp->b_bufsize; 333 bp->b_freelistindex = dp - bufqueues; 334} 335 336void 337bremfree(buf_t *bp) 338{ 339 struct bqueue *dp; 340 int bqidx = bp->b_freelistindex; 341 342 KASSERT(mutex_owned(&bufcache_lock)); 343 344 KASSERT(bqidx != -1); 345 dp = &bufqueues[bqidx]; 346 KDASSERT(checkfreelist(bp, dp, 1)); 347 KASSERT(dp->bq_bytes >= bp->b_bufsize); 348 TAILQ_REMOVE(&dp->bq_queue, bp, b_freelist); 349 dp->bq_bytes -= bp->b_bufsize; 350 351 /* For the sysctl helper. */ 352 if (bp == dp->bq_marker) 353 dp->bq_marker = NULL; 354 355#if defined(DIAGNOSTIC) 356 bp->b_freelistindex = -1; 357#endif /* defined(DIAGNOSTIC) */ 358} 359 360/* 361 * Add a reference to an buffer structure that came from buf_cache. 362 */ 363static inline void 364bref(buf_t *bp) 365{ 366 367 KASSERT(mutex_owned(&bufcache_lock)); 368 KASSERT(bp->b_refcnt > 0); 369 370 bp->b_refcnt++; 371} 372 373/* 374 * Free an unused buffer structure that came from buf_cache. 375 */ 376static inline void 377brele(buf_t *bp) 378{ 379 380 KASSERT(mutex_owned(&bufcache_lock)); 381 KASSERT(bp->b_refcnt > 0); 382 383 if (bp->b_refcnt-- == 1) { 384 buf_destroy(bp); 385#ifdef DEBUG 386 memset((char *)bp, 0, sizeof(*bp)); 387#endif 388 pool_cache_put(buf_cache, bp); 389 } 390} 391 392/* 393 * note that for some ports this is used by pmap bootstrap code to 394 * determine kva size. 395 */ 396u_long 397buf_memcalc(void) 398{ 399 u_long n; 400 vsize_t mapsz = 0; 401 402 /* 403 * Determine the upper bound of memory to use for buffers. 404 * 405 * - If bufpages is specified, use that as the number 406 * pages. 407 * 408 * - Otherwise, use bufcache as the percentage of 409 * physical memory. 410 */ 411 if (bufpages != 0) { 412 n = bufpages; 413 } else { 414 if (bufcache < 5) { 415 printf("forcing bufcache %d -> 5", bufcache); 416 bufcache = 5; 417 } 418 if (bufcache > 95) { 419 printf("forcing bufcache %d -> 95", bufcache); 420 bufcache = 95; 421 } 422 if (buf_map != NULL) 423 mapsz = vm_map_max(buf_map) - vm_map_min(buf_map); 424 n = calc_cache_size(mapsz, bufcache, 425 (buf_map != kernel_map) ? 100 : BUFCACHE_VA_MAXPCT) 426 / PAGE_SIZE; 427 } 428 429 n <<= PAGE_SHIFT; 430 if (bufmem_valimit != 0 && n > bufmem_valimit) 431 n = bufmem_valimit; 432 433 return (n); 434} 435 436/* 437 * Initialize buffers and hash links for buffers. 438 */ 439void 440bufinit(void) 441{ 442 struct bqueue *dp; 443 int use_std; 444 u_int i; 445 extern void (*biodone_vfs)(buf_t *); 446 447 biodone_vfs = biodone; 448 449 mutex_init(&bufcache_lock, MUTEX_DEFAULT, IPL_NONE); 450 mutex_init(&buffer_lock, MUTEX_DEFAULT, IPL_NONE); 451 cv_init(&needbuffer_cv, "needbuf"); 452 453 if (bufmem_valimit != 0) { 454 vaddr_t minaddr = 0, maxaddr; 455 buf_map = uvm_km_suballoc(kernel_map, &minaddr, &maxaddr, 456 bufmem_valimit, 0, false, 0); 457 if (buf_map == NULL) 458 panic("bufinit: cannot allocate submap"); 459 } else 460 buf_map = kernel_map; 461 462 /* 463 * Initialize buffer cache memory parameters. 464 */ 465 bufmem = 0; 466 buf_setwm(); 467 468 /* On "small" machines use small pool page sizes where possible */ 469 use_std = (physmem < atop(16*1024*1024)); 470 471 /* 472 * Also use them on systems that can map the pool pages using 473 * a direct-mapped segment. 474 */ 475#ifdef PMAP_MAP_POOLPAGE 476 use_std = 1; 477#endif 478 479 buf_cache = pool_cache_init(sizeof(buf_t), 0, 0, 0, 480 "bufpl", NULL, IPL_SOFTBIO, NULL, NULL, NULL); 481 bufio_cache = pool_cache_init(sizeof(buf_t), 0, 0, 0, 482 "biopl", NULL, IPL_BIO, NULL, NULL, NULL); 483 484 for (i = 0; i < NMEMPOOLS; i++) { 485 struct pool_allocator *pa; 486 struct pool *pp = &bmempools[i]; 487 u_int size = 1 << (i + MEMPOOL_INDEX_OFFSET); 488 char *name = kmem_alloc(8, KM_SLEEP); /* XXX: never freed */ 489 if (__predict_false(size >= 1048576)) 490 (void)snprintf(name, 8, "buf%um", size / 1048576); 491 else if (__predict_true(size >= 1024)) 492 (void)snprintf(name, 8, "buf%uk", size / 1024); 493 else 494 (void)snprintf(name, 8, "buf%ub", size); 495 pa = (size <= PAGE_SIZE && use_std) 496 ? &pool_allocator_nointr 497 : &bufmempool_allocator; 498 pool_init(pp, size, 0, 0, 0, name, pa, IPL_NONE); 499 pool_setlowat(pp, 1); 500 pool_sethiwat(pp, 1); 501 } 502 503 /* Initialize the buffer queues */ 504 for (dp = bufqueues; dp < &bufqueues[BQUEUES]; dp++) { 505 TAILQ_INIT(&dp->bq_queue); 506 dp->bq_bytes = 0; 507 } 508 509 /* 510 * Estimate hash table size based on the amount of memory we 511 * intend to use for the buffer cache. The average buffer 512 * size is dependent on our clients (i.e. filesystems). 513 * 514 * For now, use an empirical 3K per buffer. 515 */ 516 nbuf = (bufmem_hiwater / 1024) / 3; 517 bufhashtbl = hashinit(nbuf, HASH_LIST, true, &bufhash); 518 519 sysctl_kern_buf_setup(); 520 sysctl_vm_buf_setup(); 521} 522 523void 524bufinit2(void) 525{ 526 527 biodone_sih = softint_establish(SOFTINT_BIO | SOFTINT_MPSAFE, biointr, 528 NULL); 529 if (biodone_sih == NULL) 530 panic("bufinit2: can't establish soft interrupt"); 531} 532 533static int 534buf_lotsfree(void) 535{ 536 u_long guess; 537 538 /* Always allocate if less than the low water mark. */ 539 if (bufmem < bufmem_lowater) 540 return 1; 541 542 /* Never allocate if greater than the high water mark. */ 543 if (bufmem > bufmem_hiwater) 544 return 0; 545 546 /* If there's anything on the AGE list, it should be eaten. */ 547 if (TAILQ_FIRST(&bufqueues[BQ_AGE].bq_queue) != NULL) 548 return 0; 549 550 /* 551 * The probabily of getting a new allocation is inversely 552 * proportional to the current size of the cache above 553 * the low water mark. Divide the total first to avoid overflows 554 * in the product. 555 */ 556 guess = cprng_fast32() % 16; 557 558 if ((bufmem_hiwater - bufmem_lowater) / 16 * guess >= 559 (bufmem - bufmem_lowater)) 560 return 1; 561 562 /* Otherwise don't allocate. */ 563 return 0; 564} 565 566/* 567 * Return estimate of bytes we think need to be 568 * released to help resolve low memory conditions. 569 * 570 * => called with bufcache_lock held. 571 */ 572static int 573buf_canrelease(void) 574{ 575 int pagedemand, ninvalid = 0; 576 577 KASSERT(mutex_owned(&bufcache_lock)); 578 579 if (bufmem < bufmem_lowater) 580 return 0; 581 582 if (bufmem > bufmem_hiwater) 583 return bufmem - bufmem_hiwater; 584 585 ninvalid += bufqueues[BQ_AGE].bq_bytes; 586 587 pagedemand = uvmexp.freetarg - uvmexp.free; 588 if (pagedemand < 0) 589 return ninvalid; 590 return MAX(ninvalid, MIN(2 * MAXBSIZE, 591 MIN((bufmem - bufmem_lowater) / 16, pagedemand * PAGE_SIZE))); 592} 593 594/* 595 * Buffer memory allocation helper functions 596 */ 597static u_long 598buf_mempoolidx(u_long size) 599{ 600 u_int n = 0; 601 602 size -= 1; 603 size >>= MEMPOOL_INDEX_OFFSET; 604 while (size) { 605 size >>= 1; 606 n += 1; 607 } 608 if (n >= NMEMPOOLS) 609 panic("buf mem pool index %d", n); 610 return n; 611} 612 613static u_long 614buf_roundsize(u_long size) 615{ 616 /* Round up to nearest power of 2 */ 617 return (1 << (buf_mempoolidx(size) + MEMPOOL_INDEX_OFFSET)); 618} 619 620static void * 621buf_alloc(size_t size) 622{ 623 u_int n = buf_mempoolidx(size); 624 void *addr; 625 626 while (1) { 627 addr = pool_get(&bmempools[n], PR_NOWAIT); 628 if (addr != NULL) 629 break; 630 631 /* No memory, see if we can free some. If so, try again */ 632 mutex_enter(&bufcache_lock); 633 if (buf_drain(1) > 0) { 634 mutex_exit(&bufcache_lock); 635 continue; 636 } 637 638 if (curlwp == uvm.pagedaemon_lwp) { 639 mutex_exit(&bufcache_lock); 640 return NULL; 641 } 642 643 /* Wait for buffers to arrive on the LRU queue */ 644 cv_timedwait(&needbuffer_cv, &bufcache_lock, hz / 4); 645 mutex_exit(&bufcache_lock); 646 } 647 648 return addr; 649} 650 651static void 652buf_mrelease(void *addr, size_t size) 653{ 654 655 pool_put(&bmempools[buf_mempoolidx(size)], addr); 656} 657 658/* 659 * bread()/breadn() helper. 660 */ 661static buf_t * 662bio_doread(struct vnode *vp, daddr_t blkno, int size, kauth_cred_t cred, 663 int async) 664{ 665 buf_t *bp; 666 struct mount *mp; 667 668 bp = getblk(vp, blkno, size, 0, 0); 669 670 /* 671 * getblk() may return NULL if we are the pagedaemon. 672 */ 673 if (bp == NULL) { 674 KASSERT(curlwp == uvm.pagedaemon_lwp); 675 return NULL; 676 } 677 678 /* 679 * If buffer does not have data valid, start a read. 680 * Note that if buffer is BC_INVAL, getblk() won't return it. 681 * Therefore, it's valid if its I/O has completed or been delayed. 682 */ 683 if (!ISSET(bp->b_oflags, (BO_DONE | BO_DELWRI))) { 684 /* Start I/O for the buffer. */ 685 SET(bp->b_flags, B_READ | async); 686 if (async) 687 BIO_SETPRIO(bp, BPRIO_TIMELIMITED); 688 else 689 BIO_SETPRIO(bp, BPRIO_TIMECRITICAL); 690 VOP_STRATEGY(vp, bp); 691 692 /* Pay for the read. */ 693 curlwp->l_ru.ru_inblock++; 694 } else if (async) 695 brelse(bp, 0); 696 697 if (vp->v_type == VBLK) 698 mp = spec_node_getmountedfs(vp); 699 else 700 mp = vp->v_mount; 701 702 /* 703 * Collect statistics on synchronous and asynchronous reads. 704 * Reads from block devices are charged to their associated 705 * filesystem (if any). 706 */ 707 if (mp != NULL) { 708 if (async == 0) 709 mp->mnt_stat.f_syncreads++; 710 else 711 mp->mnt_stat.f_asyncreads++; 712 } 713 714 return (bp); 715} 716 717/* 718 * Read a disk block. 719 * This algorithm described in Bach (p.54). 720 */ 721int 722bread(struct vnode *vp, daddr_t blkno, int size, kauth_cred_t cred, 723 int flags, buf_t **bpp) 724{ 725 buf_t *bp; 726 int error; 727 728 /* Get buffer for block. */ 729 bp = *bpp = bio_doread(vp, blkno, size, cred, 0); 730 if (bp == NULL) 731 return ENOMEM; 732 733 /* Wait for the read to complete, and return result. */ 734 error = biowait(bp); 735 if (error == 0 && (flags & B_MODIFY) != 0) 736 error = fscow_run(bp, true); 737 if (error) { 738 brelse(bp, 0); 739 *bpp = NULL; 740 } 741 742 return error; 743} 744 745/* 746 * Read-ahead multiple disk blocks. The first is sync, the rest async. 747 * Trivial modification to the breada algorithm presented in Bach (p.55). 748 */ 749int 750breadn(struct vnode *vp, daddr_t blkno, int size, daddr_t *rablks, 751 int *rasizes, int nrablks, kauth_cred_t cred, int flags, buf_t **bpp) 752{ 753 buf_t *bp; 754 int error, i; 755 756 bp = *bpp = bio_doread(vp, blkno, size, cred, 0); 757 if (bp == NULL) 758 return ENOMEM; 759 760 /* 761 * For each of the read-ahead blocks, start a read, if necessary. 762 */ 763 mutex_enter(&bufcache_lock); 764 for (i = 0; i < nrablks; i++) { 765 /* If it's in the cache, just go on to next one. */ 766 if (incore(vp, rablks[i])) 767 continue; 768 769 /* Get a buffer for the read-ahead block */ 770 mutex_exit(&bufcache_lock); 771 (void) bio_doread(vp, rablks[i], rasizes[i], cred, B_ASYNC); 772 mutex_enter(&bufcache_lock); 773 } 774 mutex_exit(&bufcache_lock); 775 776 /* Otherwise, we had to start a read for it; wait until it's valid. */ 777 error = biowait(bp); 778 if (error == 0 && (flags & B_MODIFY) != 0) 779 error = fscow_run(bp, true); 780 if (error) { 781 brelse(bp, 0); 782 *bpp = NULL; 783 } 784 785 return error; 786} 787 788/* 789 * Block write. Described in Bach (p.56) 790 */ 791int 792bwrite(buf_t *bp) 793{ 794 int rv, sync, wasdelayed; 795 struct vnode *vp; 796 struct mount *mp; 797 798 KASSERT(ISSET(bp->b_cflags, BC_BUSY)); 799 KASSERT(!cv_has_waiters(&bp->b_done)); 800 801 vp = bp->b_vp; 802 if (vp != NULL) { 803 KASSERT(bp->b_objlock == vp->v_interlock); 804 if (vp->v_type == VBLK) 805 mp = spec_node_getmountedfs(vp); 806 else 807 mp = vp->v_mount; 808 } else { 809 mp = NULL; 810 } 811 812 if (mp && mp->mnt_wapbl) { 813 if (bp->b_iodone != mp->mnt_wapbl_op->wo_wapbl_biodone) { 814 bdwrite(bp); 815 return 0; 816 } 817 } 818 819 /* 820 * Remember buffer type, to switch on it later. If the write was 821 * synchronous, but the file system was mounted with MNT_ASYNC, 822 * convert it to a delayed write. 823 * XXX note that this relies on delayed tape writes being converted 824 * to async, not sync writes (which is safe, but ugly). 825 */ 826 sync = !ISSET(bp->b_flags, B_ASYNC); 827 if (sync && mp != NULL && ISSET(mp->mnt_flag, MNT_ASYNC)) { 828 bdwrite(bp); 829 return (0); 830 } 831 832 /* 833 * Collect statistics on synchronous and asynchronous writes. 834 * Writes to block devices are charged to their associated 835 * filesystem (if any). 836 */ 837 if (mp != NULL) { 838 if (sync) 839 mp->mnt_stat.f_syncwrites++; 840 else 841 mp->mnt_stat.f_asyncwrites++; 842 } 843 844 /* 845 * Pay for the I/O operation and make sure the buf is on the correct 846 * vnode queue. 847 */ 848 bp->b_error = 0; 849 wasdelayed = ISSET(bp->b_oflags, BO_DELWRI); 850 CLR(bp->b_flags, B_READ); 851 if (wasdelayed) { 852 mutex_enter(&bufcache_lock); 853 mutex_enter(bp->b_objlock); 854 CLR(bp->b_oflags, BO_DONE | BO_DELWRI); 855 reassignbuf(bp, bp->b_vp); 856 mutex_exit(&bufcache_lock); 857 } else { 858 curlwp->l_ru.ru_oublock++; 859 mutex_enter(bp->b_objlock); 860 CLR(bp->b_oflags, BO_DONE | BO_DELWRI); 861 } 862 if (vp != NULL) 863 vp->v_numoutput++; 864 mutex_exit(bp->b_objlock); 865 866 /* Initiate disk write. */ 867 if (sync) 868 BIO_SETPRIO(bp, BPRIO_TIMECRITICAL); 869 else 870 BIO_SETPRIO(bp, BPRIO_TIMELIMITED); 871 872 VOP_STRATEGY(vp, bp); 873 874 if (sync) { 875 /* If I/O was synchronous, wait for it to complete. */ 876 rv = biowait(bp); 877 878 /* Release the buffer. */ 879 brelse(bp, 0); 880 881 return (rv); 882 } else { 883 return (0); 884 } 885} 886 887int 888vn_bwrite(void *v) 889{ 890 struct vop_bwrite_args *ap = v; 891 892 return (bwrite(ap->a_bp)); 893} 894 895/* 896 * Delayed write. 897 * 898 * The buffer is marked dirty, but is not queued for I/O. 899 * This routine should be used when the buffer is expected 900 * to be modified again soon, typically a small write that 901 * partially fills a buffer. 902 * 903 * NB: magnetic tapes cannot be delayed; they must be 904 * written in the order that the writes are requested. 905 * 906 * Described in Leffler, et al. (pp. 208-213). 907 */ 908void 909bdwrite(buf_t *bp) 910{ 911 912 KASSERT(bp->b_vp == NULL || bp->b_vp->v_tag != VT_UFS || 913 bp->b_vp->v_type == VBLK || ISSET(bp->b_flags, B_COWDONE)); 914 KASSERT(ISSET(bp->b_cflags, BC_BUSY)); 915 KASSERT(!cv_has_waiters(&bp->b_done)); 916 917 /* If this is a tape block, write the block now. */ 918 if (bdev_type(bp->b_dev) == D_TAPE) { 919 bawrite(bp); 920 return; 921 } 922 923 if (wapbl_vphaswapbl(bp->b_vp)) { 924 struct mount *mp = wapbl_vptomp(bp->b_vp); 925 926 if (bp->b_iodone != mp->mnt_wapbl_op->wo_wapbl_biodone) { 927 WAPBL_ADD_BUF(mp, bp); 928 } 929 } 930 931 /* 932 * If the block hasn't been seen before: 933 * (1) Mark it as having been seen, 934 * (2) Charge for the write, 935 * (3) Make sure it's on its vnode's correct block list. 936 */ 937 KASSERT(bp->b_vp == NULL || bp->b_objlock == bp->b_vp->v_interlock); 938 939 if (!ISSET(bp->b_oflags, BO_DELWRI)) { 940 mutex_enter(&bufcache_lock); 941 mutex_enter(bp->b_objlock); 942 SET(bp->b_oflags, BO_DELWRI); 943 curlwp->l_ru.ru_oublock++; 944 reassignbuf(bp, bp->b_vp); 945 mutex_exit(&bufcache_lock); 946 } else { 947 mutex_enter(bp->b_objlock); 948 } 949 /* Otherwise, the "write" is done, so mark and release the buffer. */ 950 CLR(bp->b_oflags, BO_DONE); 951 mutex_exit(bp->b_objlock); 952 953 brelse(bp, 0); 954} 955 956/* 957 * Asynchronous block write; just an asynchronous bwrite(). 958 */ 959void 960bawrite(buf_t *bp) 961{ 962 963 KASSERT(ISSET(bp->b_cflags, BC_BUSY)); 964 KASSERT(bp->b_vp != NULL); 965 966 SET(bp->b_flags, B_ASYNC); 967 VOP_BWRITE(bp->b_vp, bp); 968} 969 970/* 971 * Release a buffer on to the free lists. 972 * Described in Bach (p. 46). 973 */ 974void 975brelsel(buf_t *bp, int set) 976{ 977 struct bqueue *bufq; 978 struct vnode *vp; 979 980 KASSERT(bp != NULL); 981 KASSERT(mutex_owned(&bufcache_lock)); 982 KASSERT(!cv_has_waiters(&bp->b_done)); 983 KASSERT(bp->b_refcnt > 0); 984 985 SET(bp->b_cflags, set); 986 987 KASSERT(ISSET(bp->b_cflags, BC_BUSY)); 988 KASSERT(bp->b_iodone == NULL); 989 990 /* Wake up any processes waiting for any buffer to become free. */ 991 cv_signal(&needbuffer_cv); 992 993 /* Wake up any proceeses waiting for _this_ buffer to become */ 994 if (ISSET(bp->b_cflags, BC_WANTED)) 995 CLR(bp->b_cflags, BC_WANTED|BC_AGE); 996 997 /* If it's clean clear the copy-on-write flag. */ 998 if (ISSET(bp->b_flags, B_COWDONE)) { 999 mutex_enter(bp->b_objlock); 1000 if (!ISSET(bp->b_oflags, BO_DELWRI)) 1001 CLR(bp->b_flags, B_COWDONE); 1002 mutex_exit(bp->b_objlock); 1003 } 1004 1005 /* 1006 * Determine which queue the buffer should be on, then put it there. 1007 */ 1008 1009 /* If it's locked, don't report an error; try again later. */ 1010 if (ISSET(bp->b_flags, B_LOCKED)) 1011 bp->b_error = 0; 1012 1013 /* If it's not cacheable, or an error, mark it invalid. */ 1014 if (ISSET(bp->b_cflags, BC_NOCACHE) || bp->b_error != 0) 1015 SET(bp->b_cflags, BC_INVAL); 1016 1017 if (ISSET(bp->b_cflags, BC_VFLUSH)) { 1018 /* 1019 * This is a delayed write buffer that was just flushed to 1020 * disk. It is still on the LRU queue. If it's become 1021 * invalid, then we need to move it to a different queue; 1022 * otherwise leave it in its current position. 1023 */ 1024 CLR(bp->b_cflags, BC_VFLUSH); 1025 if (!ISSET(bp->b_cflags, BC_INVAL|BC_AGE) && 1026 !ISSET(bp->b_flags, B_LOCKED) && bp->b_error == 0) { 1027 KDASSERT(checkfreelist(bp, &bufqueues[BQ_LRU], 1)); 1028 goto already_queued; 1029 } else { 1030 bremfree(bp); 1031 } 1032 } 1033 1034 KDASSERT(checkfreelist(bp, &bufqueues[BQ_AGE], 0)); 1035 KDASSERT(checkfreelist(bp, &bufqueues[BQ_LRU], 0)); 1036 KDASSERT(checkfreelist(bp, &bufqueues[BQ_LOCKED], 0)); 1037 1038 if ((bp->b_bufsize <= 0) || ISSET(bp->b_cflags, BC_INVAL)) { 1039 /* 1040 * If it's invalid or empty, dissociate it from its vnode 1041 * and put on the head of the appropriate queue. 1042 */ 1043 if (ISSET(bp->b_flags, B_LOCKED)) { 1044 if (wapbl_vphaswapbl(vp = bp->b_vp)) { 1045 struct mount *mp = wapbl_vptomp(vp); 1046 1047 KASSERT(bp->b_iodone 1048 != mp->mnt_wapbl_op->wo_wapbl_biodone); 1049 WAPBL_REMOVE_BUF(mp, bp); 1050 } 1051 } 1052 1053 mutex_enter(bp->b_objlock); 1054 CLR(bp->b_oflags, BO_DONE|BO_DELWRI); 1055 if ((vp = bp->b_vp) != NULL) { 1056 KASSERT(bp->b_objlock == vp->v_interlock); 1057 reassignbuf(bp, bp->b_vp); 1058 brelvp(bp); 1059 mutex_exit(vp->v_interlock); 1060 } else { 1061 KASSERT(bp->b_objlock == &buffer_lock); 1062 mutex_exit(bp->b_objlock); 1063 } 1064 1065 if (bp->b_bufsize <= 0) 1066 /* no data */ 1067 goto already_queued; 1068 else 1069 /* invalid data */ 1070 bufq = &bufqueues[BQ_AGE]; 1071 binsheadfree(bp, bufq); 1072 } else { 1073 /* 1074 * It has valid data. Put it on the end of the appropriate 1075 * queue, so that it'll stick around for as long as possible. 1076 * If buf is AGE, but has dependencies, must put it on last 1077 * bufqueue to be scanned, ie LRU. This protects against the 1078 * livelock where BQ_AGE only has buffers with dependencies, 1079 * and we thus never get to the dependent buffers in BQ_LRU. 1080 */ 1081 if (ISSET(bp->b_flags, B_LOCKED)) { 1082 /* locked in core */ 1083 bufq = &bufqueues[BQ_LOCKED]; 1084 } else if (!ISSET(bp->b_cflags, BC_AGE)) { 1085 /* valid data */ 1086 bufq = &bufqueues[BQ_LRU]; 1087 } else { 1088 /* stale but valid data */ 1089 bufq = &bufqueues[BQ_AGE]; 1090 } 1091 binstailfree(bp, bufq); 1092 } 1093already_queued: 1094 /* Unlock the buffer. */ 1095 CLR(bp->b_cflags, BC_AGE|BC_BUSY|BC_NOCACHE); 1096 CLR(bp->b_flags, B_ASYNC); 1097 cv_broadcast(&bp->b_busy); 1098 1099 if (bp->b_bufsize <= 0) 1100 brele(bp); 1101} 1102 1103void 1104brelse(buf_t *bp, int set) 1105{ 1106 1107 mutex_enter(&bufcache_lock); 1108 brelsel(bp, set); 1109 mutex_exit(&bufcache_lock); 1110} 1111 1112/* 1113 * Determine if a block is in the cache. 1114 * Just look on what would be its hash chain. If it's there, return 1115 * a pointer to it, unless it's marked invalid. If it's marked invalid, 1116 * we normally don't return the buffer, unless the caller explicitly 1117 * wants us to. 1118 */ 1119buf_t * 1120incore(struct vnode *vp, daddr_t blkno) 1121{ 1122 buf_t *bp; 1123 1124 KASSERT(mutex_owned(&bufcache_lock)); 1125 1126 /* Search hash chain */ 1127 LIST_FOREACH(bp, BUFHASH(vp, blkno), b_hash) { 1128 if (bp->b_lblkno == blkno && bp->b_vp == vp && 1129 !ISSET(bp->b_cflags, BC_INVAL)) { 1130 KASSERT(bp->b_objlock == vp->v_interlock); 1131 return (bp); 1132 } 1133 } 1134 1135 return (NULL); 1136} 1137 1138/* 1139 * Get a block of requested size that is associated with 1140 * a given vnode and block offset. If it is found in the 1141 * block cache, mark it as having been found, make it busy 1142 * and return it. Otherwise, return an empty block of the 1143 * correct size. It is up to the caller to insure that the 1144 * cached blocks be of the correct size. 1145 */ 1146buf_t * 1147getblk(struct vnode *vp, daddr_t blkno, int size, int slpflag, int slptimeo) 1148{ 1149 int err, preserve; 1150 buf_t *bp; 1151 1152 mutex_enter(&bufcache_lock); 1153 loop: 1154 bp = incore(vp, blkno); 1155 if (bp != NULL) { 1156 err = bbusy(bp, ((slpflag & PCATCH) != 0), slptimeo, NULL); 1157 if (err != 0) { 1158 if (err == EPASSTHROUGH) 1159 goto loop; 1160 mutex_exit(&bufcache_lock); 1161 return (NULL); 1162 } 1163 KASSERT(!cv_has_waiters(&bp->b_done)); 1164#ifdef DIAGNOSTIC 1165 if (ISSET(bp->b_oflags, BO_DONE|BO_DELWRI) && 1166 bp->b_bcount < size && vp->v_type != VBLK) 1167 panic("getblk: block size invariant failed"); 1168#endif 1169 bremfree(bp); 1170 preserve = 1; 1171 } else { 1172 if ((bp = getnewbuf(slpflag, slptimeo, 0)) == NULL) 1173 goto loop; 1174 1175 if (incore(vp, blkno) != NULL) { 1176 /* The block has come into memory in the meantime. */ 1177 brelsel(bp, 0); 1178 goto loop; 1179 } 1180 1181 LIST_INSERT_HEAD(BUFHASH(vp, blkno), bp, b_hash); 1182 bp->b_blkno = bp->b_lblkno = bp->b_rawblkno = blkno; 1183 mutex_enter(vp->v_interlock); 1184 bgetvp(vp, bp); 1185 mutex_exit(vp->v_interlock); 1186 preserve = 0; 1187 } 1188 mutex_exit(&bufcache_lock); 1189 1190 /* 1191 * LFS can't track total size of B_LOCKED buffer (locked_queue_bytes) 1192 * if we re-size buffers here. 1193 */ 1194 if (ISSET(bp->b_flags, B_LOCKED)) { 1195 KASSERT(bp->b_bufsize >= size); 1196 } else { 1197 if (allocbuf(bp, size, preserve)) { 1198 mutex_enter(&bufcache_lock); 1199 LIST_REMOVE(bp, b_hash); 1200 mutex_exit(&bufcache_lock); 1201 brelse(bp, BC_INVAL); 1202 return NULL; 1203 } 1204 } 1205 BIO_SETPRIO(bp, BPRIO_DEFAULT); 1206 return (bp); 1207} 1208 1209/* 1210 * Get an empty, disassociated buffer of given size. 1211 */ 1212buf_t * 1213geteblk(int size) 1214{ 1215 buf_t *bp; 1216 int error __diagused; 1217 1218 mutex_enter(&bufcache_lock); 1219 while ((bp = getnewbuf(0, 0, 0)) == NULL) 1220 ; 1221 1222 SET(bp->b_cflags, BC_INVAL); 1223 LIST_INSERT_HEAD(&invalhash, bp, b_hash); 1224 mutex_exit(&bufcache_lock); 1225 BIO_SETPRIO(bp, BPRIO_DEFAULT); 1226 error = allocbuf(bp, size, 0); 1227 KASSERT(error == 0); 1228 return (bp); 1229} 1230 1231/* 1232 * Expand or contract the actual memory allocated to a buffer. 1233 * 1234 * If the buffer shrinks, data is lost, so it's up to the 1235 * caller to have written it out *first*; this routine will not 1236 * start a write. If the buffer grows, it's the callers 1237 * responsibility to fill out the buffer's additional contents. 1238 */ 1239int 1240allocbuf(buf_t *bp, int size, int preserve) 1241{ 1242 void *addr; 1243 vsize_t oldsize, desired_size; 1244 int oldcount; 1245 int delta; 1246 1247 desired_size = buf_roundsize(size); 1248 if (desired_size > MAXBSIZE) 1249 printf("allocbuf: buffer larger than MAXBSIZE requested"); 1250 1251 oldcount = bp->b_bcount; 1252 1253 bp->b_bcount = size; 1254 1255 oldsize = bp->b_bufsize; 1256 if (oldsize == desired_size) { 1257 /* 1258 * Do not short cut the WAPBL resize, as the buffer length 1259 * could still have changed and this would corrupt the 1260 * tracking of the transaction length. 1261 */ 1262 goto out; 1263 } 1264 1265 /* 1266 * If we want a buffer of a different size, re-allocate the 1267 * buffer's memory; copy old content only if needed. 1268 */ 1269 addr = buf_alloc(desired_size); 1270 if (addr == NULL) 1271 return ENOMEM; 1272 if (preserve) 1273 memcpy(addr, bp->b_data, MIN(oldsize,desired_size)); 1274 if (bp->b_data != NULL) 1275 buf_mrelease(bp->b_data, oldsize); 1276 bp->b_data = addr; 1277 bp->b_bufsize = desired_size; 1278 1279 /* 1280 * Update overall buffer memory counter (protected by bufcache_lock) 1281 */ 1282 delta = (long)desired_size - (long)oldsize; 1283 1284 mutex_enter(&bufcache_lock); 1285 if ((bufmem += delta) > bufmem_hiwater) { 1286 /* 1287 * Need to trim overall memory usage. 1288 */ 1289 while (buf_canrelease()) { 1290 if (curcpu()->ci_schedstate.spc_flags & 1291 SPCF_SHOULDYIELD) { 1292 mutex_exit(&bufcache_lock); 1293 preempt(); 1294 mutex_enter(&bufcache_lock); 1295 } 1296 if (buf_trim() == 0) 1297 break; 1298 } 1299 } 1300 mutex_exit(&bufcache_lock); 1301 1302 out: 1303 if (wapbl_vphaswapbl(bp->b_vp)) 1304 WAPBL_RESIZE_BUF(wapbl_vptomp(bp->b_vp), bp, oldsize, oldcount); 1305 1306 return 0; 1307} 1308 1309/* 1310 * Find a buffer which is available for use. 1311 * Select something from a free list. 1312 * Preference is to AGE list, then LRU list. 1313 * 1314 * Called with the buffer queues locked. 1315 * Return buffer locked. 1316 */ 1317buf_t * 1318getnewbuf(int slpflag, int slptimeo, int from_bufq) 1319{ 1320 buf_t *bp; 1321 struct vnode *vp; 1322 1323 start: 1324 KASSERT(mutex_owned(&bufcache_lock)); 1325 1326 /* 1327 * Get a new buffer from the pool. 1328 */ 1329 if (!from_bufq && buf_lotsfree()) { 1330 mutex_exit(&bufcache_lock); 1331 bp = pool_cache_get(buf_cache, PR_NOWAIT); 1332 if (bp != NULL) { 1333 memset((char *)bp, 0, sizeof(*bp)); 1334 buf_init(bp); 1335 SET(bp->b_cflags, BC_BUSY); /* mark buffer busy */ 1336 mutex_enter(&bufcache_lock); 1337#if defined(DIAGNOSTIC) 1338 bp->b_freelistindex = -1; 1339#endif /* defined(DIAGNOSTIC) */ 1340 return (bp); 1341 } 1342 mutex_enter(&bufcache_lock); 1343 } 1344 1345 KASSERT(mutex_owned(&bufcache_lock)); 1346 if ((bp = TAILQ_FIRST(&bufqueues[BQ_AGE].bq_queue)) != NULL || 1347 (bp = TAILQ_FIRST(&bufqueues[BQ_LRU].bq_queue)) != NULL) { 1348 KASSERT(!ISSET(bp->b_cflags, BC_BUSY) || ISSET(bp->b_cflags, BC_VFLUSH)); 1349 bremfree(bp); 1350 1351 /* Buffer is no longer on free lists. */ 1352 SET(bp->b_cflags, BC_BUSY); 1353 } else { 1354 /* 1355 * XXX: !from_bufq should be removed. 1356 */ 1357 if (!from_bufq || curlwp != uvm.pagedaemon_lwp) { 1358 /* wait for a free buffer of any kind */ 1359 if ((slpflag & PCATCH) != 0) 1360 (void)cv_timedwait_sig(&needbuffer_cv, 1361 &bufcache_lock, slptimeo); 1362 else 1363 (void)cv_timedwait(&needbuffer_cv, 1364 &bufcache_lock, slptimeo); 1365 } 1366 return (NULL); 1367 } 1368 1369#ifdef DIAGNOSTIC 1370 if (bp->b_bufsize <= 0) 1371 panic("buffer %p: on queue but empty", bp); 1372#endif 1373 1374 if (ISSET(bp->b_cflags, BC_VFLUSH)) { 1375 /* 1376 * This is a delayed write buffer being flushed to disk. Make 1377 * sure it gets aged out of the queue when it's finished, and 1378 * leave it off the LRU queue. 1379 */ 1380 CLR(bp->b_cflags, BC_VFLUSH); 1381 SET(bp->b_cflags, BC_AGE); 1382 goto start; 1383 } 1384 1385 KASSERT(ISSET(bp->b_cflags, BC_BUSY)); 1386 KASSERT(bp->b_refcnt > 0); 1387 KASSERT(!cv_has_waiters(&bp->b_done)); 1388 1389 /* 1390 * If buffer was a delayed write, start it and return NULL 1391 * (since we might sleep while starting the write). 1392 */ 1393 if (ISSET(bp->b_oflags, BO_DELWRI)) { 1394 /* 1395 * This buffer has gone through the LRU, so make sure it gets 1396 * reused ASAP. 1397 */ 1398 SET(bp->b_cflags, BC_AGE); 1399 mutex_exit(&bufcache_lock); 1400 bawrite(bp); 1401 mutex_enter(&bufcache_lock); 1402 return (NULL); 1403 } 1404 1405 vp = bp->b_vp; 1406 1407 /* clear out various other fields */ 1408 bp->b_cflags = BC_BUSY; 1409 bp->b_oflags = 0; 1410 bp->b_flags = 0; 1411 bp->b_dev = NODEV; 1412 bp->b_blkno = 0; 1413 bp->b_lblkno = 0; 1414 bp->b_rawblkno = 0; 1415 bp->b_iodone = 0; 1416 bp->b_error = 0; 1417 bp->b_resid = 0; 1418 bp->b_bcount = 0; 1419 1420 LIST_REMOVE(bp, b_hash); 1421 1422 /* Disassociate us from our vnode, if we had one... */ 1423 if (vp != NULL) { 1424 mutex_enter(vp->v_interlock); 1425 brelvp(bp); 1426 mutex_exit(vp->v_interlock); 1427 } 1428 1429 return (bp); 1430} 1431 1432/* 1433 * Attempt to free an aged buffer off the queues. 1434 * Called with queue lock held. 1435 * Returns the amount of buffer memory freed. 1436 */ 1437static int 1438buf_trim(void) 1439{ 1440 buf_t *bp; 1441 long size; 1442 1443 KASSERT(mutex_owned(&bufcache_lock)); 1444 1445 /* Instruct getnewbuf() to get buffers off the queues */ 1446 if ((bp = getnewbuf(PCATCH, 1, 1)) == NULL) 1447 return 0; 1448 1449 KASSERT((bp->b_cflags & BC_WANTED) == 0); 1450 size = bp->b_bufsize; 1451 bufmem -= size; 1452 if (size > 0) { 1453 buf_mrelease(bp->b_data, size); 1454 bp->b_bcount = bp->b_bufsize = 0; 1455 } 1456 /* brelse() will return the buffer to the global buffer pool */ 1457 brelsel(bp, 0); 1458 return size; 1459} 1460 1461int 1462buf_drain(int n) 1463{ 1464 int size = 0, sz; 1465 1466 KASSERT(mutex_owned(&bufcache_lock)); 1467 1468 while (size < n && bufmem > bufmem_lowater) { 1469 sz = buf_trim(); 1470 if (sz <= 0) 1471 break; 1472 size += sz; 1473 } 1474 1475 return size; 1476} 1477 1478/* 1479 * Wait for operations on the buffer to complete. 1480 * When they do, extract and return the I/O's error value. 1481 */ 1482int 1483biowait(buf_t *bp) 1484{ 1485 1486 KASSERT(ISSET(bp->b_cflags, BC_BUSY)); 1487 KASSERT(bp->b_refcnt > 0); 1488 1489 mutex_enter(bp->b_objlock); 1490 while (!ISSET(bp->b_oflags, BO_DONE | BO_DELWRI)) 1491 cv_wait(&bp->b_done, bp->b_objlock); 1492 mutex_exit(bp->b_objlock); 1493 1494 return bp->b_error; 1495} 1496 1497/* 1498 * Mark I/O complete on a buffer. 1499 * 1500 * If a callback has been requested, e.g. the pageout 1501 * daemon, do so. Otherwise, awaken waiting processes. 1502 * 1503 * [ Leffler, et al., says on p.247: 1504 * "This routine wakes up the blocked process, frees the buffer 1505 * for an asynchronous write, or, for a request by the pagedaemon 1506 * process, invokes a procedure specified in the buffer structure" ] 1507 * 1508 * In real life, the pagedaemon (or other system processes) wants 1509 * to do async stuff to, and doesn't want the buffer brelse()'d. 1510 * (for swap pager, that puts swap buffers on the free lists (!!!), 1511 * for the vn device, that puts allocated buffers on the free lists!) 1512 */ 1513void 1514biodone(buf_t *bp) 1515{ 1516 int s; 1517 1518 KASSERT(!ISSET(bp->b_oflags, BO_DONE)); 1519 1520 if (cpu_intr_p()) { 1521 /* From interrupt mode: defer to a soft interrupt. */ 1522 s = splvm(); 1523 TAILQ_INSERT_TAIL(&curcpu()->ci_data.cpu_biodone, bp, b_actq); 1524 softint_schedule(biodone_sih); 1525 splx(s); 1526 } else { 1527 /* Process now - the buffer may be freed soon. */ 1528 biodone2(bp); 1529 } 1530} 1531 1532static void 1533biodone2(buf_t *bp) 1534{ 1535 void (*callout)(buf_t *); 1536 1537 mutex_enter(bp->b_objlock); 1538 /* Note that the transfer is done. */ 1539 if (ISSET(bp->b_oflags, BO_DONE)) 1540 panic("biodone2 already"); 1541 CLR(bp->b_flags, B_COWDONE); 1542 SET(bp->b_oflags, BO_DONE); 1543 BIO_SETPRIO(bp, BPRIO_DEFAULT); 1544 1545 /* Wake up waiting writers. */ 1546 if (!ISSET(bp->b_flags, B_READ)) 1547 vwakeup(bp); 1548 1549 if ((callout = bp->b_iodone) != NULL) { 1550 /* Note callout done, then call out. */ 1551 KASSERT(!cv_has_waiters(&bp->b_done)); 1552 KERNEL_LOCK(1, NULL); /* XXXSMP */ 1553 bp->b_iodone = NULL; 1554 mutex_exit(bp->b_objlock); 1555 (*callout)(bp); 1556 KERNEL_UNLOCK_ONE(NULL); /* XXXSMP */ 1557 } else if (ISSET(bp->b_flags, B_ASYNC)) { 1558 /* If async, release. */ 1559 KASSERT(!cv_has_waiters(&bp->b_done)); 1560 mutex_exit(bp->b_objlock); 1561 brelse(bp, 0); 1562 } else { 1563 /* Otherwise just wake up waiters in biowait(). */ 1564 cv_broadcast(&bp->b_done); 1565 mutex_exit(bp->b_objlock); 1566 } 1567} 1568 1569static void 1570biointr(void *cookie) 1571{ 1572 struct cpu_info *ci; 1573 buf_t *bp; 1574 int s; 1575 1576 ci = curcpu(); 1577 1578 while (!TAILQ_EMPTY(&ci->ci_data.cpu_biodone)) { 1579 KASSERT(curcpu() == ci); 1580 1581 s = splvm(); 1582 bp = TAILQ_FIRST(&ci->ci_data.cpu_biodone); 1583 TAILQ_REMOVE(&ci->ci_data.cpu_biodone, bp, b_actq); 1584 splx(s); 1585 1586 biodone2(bp); 1587 } 1588} 1589 1590/* 1591 * Wait for all buffers to complete I/O 1592 * Return the number of "stuck" buffers. 1593 */ 1594int 1595buf_syncwait(void) 1596{ 1597 buf_t *bp; 1598 int iter, nbusy, nbusy_prev = 0, ihash; 1599 1600 for (iter = 0; iter < 20;) { 1601 mutex_enter(&bufcache_lock); 1602 nbusy = 0; 1603 for (ihash = 0; ihash < bufhash+1; ihash++) { 1604 LIST_FOREACH(bp, &bufhashtbl[ihash], b_hash) { 1605 if ((bp->b_cflags & (BC_BUSY|BC_INVAL)) == BC_BUSY) 1606 nbusy += ((bp->b_flags & B_READ) == 0); 1607 } 1608 } 1609 mutex_exit(&bufcache_lock); 1610 1611 if (nbusy == 0) 1612 break; 1613 if (nbusy_prev == 0) 1614 nbusy_prev = nbusy; 1615 printf("%d ", nbusy); 1616 kpause("bflush", false, MAX(1, hz / 25 * iter), NULL); 1617 if (nbusy >= nbusy_prev) /* we didn't flush anything */ 1618 iter++; 1619 else 1620 nbusy_prev = nbusy; 1621 } 1622 1623 if (nbusy) { 1624#if defined(DEBUG) || defined(DEBUG_HALT_BUSY) 1625 printf("giving up\nPrinting vnodes for busy buffers\n"); 1626 for (ihash = 0; ihash < bufhash+1; ihash++) { 1627 LIST_FOREACH(bp, &bufhashtbl[ihash], b_hash) { 1628 if ((bp->b_cflags & (BC_BUSY|BC_INVAL)) == BC_BUSY && 1629 (bp->b_flags & B_READ) == 0) 1630 vprint(NULL, bp->b_vp); 1631 } 1632 } 1633#endif 1634 } 1635 1636 return nbusy; 1637} 1638 1639static void 1640sysctl_fillbuf(buf_t *i, struct buf_sysctl *o) 1641{ 1642 1643 o->b_flags = i->b_flags | i->b_cflags | i->b_oflags; 1644 o->b_error = i->b_error; 1645 o->b_prio = i->b_prio; 1646 o->b_dev = i->b_dev; 1647 o->b_bufsize = i->b_bufsize; 1648 o->b_bcount = i->b_bcount; 1649 o->b_resid = i->b_resid; 1650 o->b_addr = PTRTOUINT64(i->b_data); 1651 o->b_blkno = i->b_blkno; 1652 o->b_rawblkno = i->b_rawblkno; 1653 o->b_iodone = PTRTOUINT64(i->b_iodone); 1654 o->b_proc = PTRTOUINT64(i->b_proc); 1655 o->b_vp = PTRTOUINT64(i->b_vp); 1656 o->b_saveaddr = PTRTOUINT64(i->b_saveaddr); 1657 o->b_lblkno = i->b_lblkno; 1658} 1659 1660#define KERN_BUFSLOP 20 1661static int 1662sysctl_dobuf(SYSCTLFN_ARGS) 1663{ 1664 buf_t *bp; 1665 struct buf_sysctl bs; 1666 struct bqueue *bq; 1667 char *dp; 1668 u_int i, op, arg; 1669 size_t len, needed, elem_size, out_size; 1670 int error, elem_count, retries; 1671 1672 if (namelen == 1 && name[0] == CTL_QUERY) 1673 return (sysctl_query(SYSCTLFN_CALL(rnode))); 1674 1675 if (namelen != 4) 1676 return (EINVAL); 1677 1678 retries = 100; 1679 retry: 1680 dp = oldp; 1681 len = (oldp != NULL) ? *oldlenp : 0; 1682 op = name[0]; 1683 arg = name[1]; 1684 elem_size = name[2]; 1685 elem_count = name[3]; 1686 out_size = MIN(sizeof(bs), elem_size); 1687 1688 /* 1689 * at the moment, these are just "placeholders" to make the 1690 * API for retrieving kern.buf data more extensible in the 1691 * future. 1692 * 1693 * XXX kern.buf currently has "netbsd32" issues. hopefully 1694 * these will be resolved at a later point. 1695 */ 1696 if (op != KERN_BUF_ALL || arg != KERN_BUF_ALL || 1697 elem_size < 1 || elem_count < 0) 1698 return (EINVAL); 1699 1700 error = 0; 1701 needed = 0; 1702 sysctl_unlock(); 1703 mutex_enter(&bufcache_lock); 1704 for (i = 0; i < BQUEUES; i++) { 1705 bq = &bufqueues[i]; 1706 TAILQ_FOREACH(bp, &bq->bq_queue, b_freelist) { 1707 bq->bq_marker = bp; 1708 if (len >= elem_size && elem_count > 0) { 1709 sysctl_fillbuf(bp, &bs); 1710 mutex_exit(&bufcache_lock); 1711 error = copyout(&bs, dp, out_size); 1712 mutex_enter(&bufcache_lock); 1713 if (error) 1714 break; 1715 if (bq->bq_marker != bp) { 1716 /* 1717 * This sysctl node is only for 1718 * statistics. Retry; if the 1719 * queue keeps changing, then 1720 * bail out. 1721 */ 1722 if (retries-- == 0) { 1723 error = EAGAIN; 1724 break; 1725 } 1726 mutex_exit(&bufcache_lock); 1727 sysctl_relock(); 1728 goto retry; 1729 } 1730 dp += elem_size; 1731 len -= elem_size; 1732 } 1733 needed += elem_size; 1734 if (elem_count > 0 && elem_count != INT_MAX) 1735 elem_count--; 1736 } 1737 if (error != 0) 1738 break; 1739 } 1740 mutex_exit(&bufcache_lock); 1741 sysctl_relock(); 1742 1743 *oldlenp = needed; 1744 if (oldp == NULL) 1745 *oldlenp += KERN_BUFSLOP * sizeof(buf_t); 1746 1747 return (error); 1748} 1749 1750static int 1751sysctl_bufvm_update(SYSCTLFN_ARGS) 1752{ 1753 int error, rv; 1754 struct sysctlnode node; 1755 unsigned int temp_bufcache; 1756 unsigned long temp_water; 1757 1758 /* Take a copy of the supplied node and its data */ 1759 node = *rnode; 1760 if (node.sysctl_data == &bufcache) { 1761 node.sysctl_data = &temp_bufcache; 1762 temp_bufcache = *(unsigned int *)rnode->sysctl_data; 1763 } else { 1764 node.sysctl_data = &temp_water; 1765 temp_water = *(unsigned long *)rnode->sysctl_data; 1766 } 1767 1768 /* Update the copy */ 1769 error = sysctl_lookup(SYSCTLFN_CALL(&node)); 1770 if (error || newp == NULL) 1771 return (error); 1772 1773 if (rnode->sysctl_data == &bufcache) { 1774 if (temp_bufcache > 100) 1775 return (EINVAL); 1776 bufcache = temp_bufcache; 1777 buf_setwm(); 1778 } else if (rnode->sysctl_data == &bufmem_lowater) { 1779 if (bufmem_hiwater - temp_water < 16) 1780 return (EINVAL); 1781 bufmem_lowater = temp_water; 1782 } else if (rnode->sysctl_data == &bufmem_hiwater) { 1783 if (temp_water - bufmem_lowater < 16) 1784 return (EINVAL); 1785 bufmem_hiwater = temp_water; 1786 } else 1787 return (EINVAL); 1788 1789 /* Drain until below new high water mark */ 1790 sysctl_unlock(); 1791 mutex_enter(&bufcache_lock); 1792 while (bufmem > bufmem_hiwater) { 1793 rv = buf_drain((bufmem - bufmem_hiwater) / (2 * 1024)); 1794 if (rv <= 0) 1795 break; 1796 } 1797 mutex_exit(&bufcache_lock); 1798 sysctl_relock(); 1799 1800 return 0; 1801} 1802 1803static struct sysctllog *vfsbio_sysctllog; 1804 1805static void 1806sysctl_kern_buf_setup(void) 1807{ 1808 1809 sysctl_createv(&vfsbio_sysctllog, 0, NULL, NULL, 1810 CTLFLAG_PERMANENT, 1811 CTLTYPE_NODE, "buf", 1812 SYSCTL_DESCR("Kernel buffer cache information"), 1813 sysctl_dobuf, 0, NULL, 0, 1814 CTL_KERN, KERN_BUF, CTL_EOL); 1815} 1816 1817static void 1818sysctl_vm_buf_setup(void) 1819{ 1820 1821 sysctl_createv(&vfsbio_sysctllog, 0, NULL, NULL, 1822 CTLFLAG_PERMANENT|CTLFLAG_READWRITE, 1823 CTLTYPE_INT, "bufcache", 1824 SYSCTL_DESCR("Percentage of physical memory to use for " 1825 "buffer cache"), 1826 sysctl_bufvm_update, 0, &bufcache, 0, 1827 CTL_VM, CTL_CREATE, CTL_EOL); 1828 sysctl_createv(&vfsbio_sysctllog, 0, NULL, NULL, 1829 CTLFLAG_PERMANENT|CTLFLAG_READONLY, 1830 CTLTYPE_LONG, "bufmem", 1831 SYSCTL_DESCR("Amount of kernel memory used by buffer " 1832 "cache"), 1833 NULL, 0, &bufmem, 0, 1834 CTL_VM, CTL_CREATE, CTL_EOL); 1835 sysctl_createv(&vfsbio_sysctllog, 0, NULL, NULL, 1836 CTLFLAG_PERMANENT|CTLFLAG_READWRITE, 1837 CTLTYPE_LONG, "bufmem_lowater", 1838 SYSCTL_DESCR("Minimum amount of kernel memory to " 1839 "reserve for buffer cache"), 1840 sysctl_bufvm_update, 0, &bufmem_lowater, 0, 1841 CTL_VM, CTL_CREATE, CTL_EOL); 1842 sysctl_createv(&vfsbio_sysctllog, 0, NULL, NULL, 1843 CTLFLAG_PERMANENT|CTLFLAG_READWRITE, 1844 CTLTYPE_LONG, "bufmem_hiwater", 1845 SYSCTL_DESCR("Maximum amount of kernel memory to use " 1846 "for buffer cache"), 1847 sysctl_bufvm_update, 0, &bufmem_hiwater, 0, 1848 CTL_VM, CTL_CREATE, CTL_EOL); 1849} 1850 1851#ifdef DEBUG 1852/* 1853 * Print out statistics on the current allocation of the buffer pool. 1854 * Can be enabled to print out on every ``sync'' by setting "syncprt" 1855 * in vfs_syscalls.c using sysctl. 1856 */ 1857void 1858vfs_bufstats(void) 1859{ 1860 int i, j, count; 1861 buf_t *bp; 1862 struct bqueue *dp; 1863 int counts[(MAXBSIZE / PAGE_SIZE) + 1]; 1864 static const char *bname[BQUEUES] = { "LOCKED", "LRU", "AGE" }; 1865 1866 for (dp = bufqueues, i = 0; dp < &bufqueues[BQUEUES]; dp++, i++) { 1867 count = 0; 1868 for (j = 0; j <= MAXBSIZE/PAGE_SIZE; j++) 1869 counts[j] = 0; 1870 TAILQ_FOREACH(bp, &dp->bq_queue, b_freelist) { 1871 counts[bp->b_bufsize/PAGE_SIZE]++; 1872 count++; 1873 } 1874 printf("%s: total-%d", bname[i], count); 1875 for (j = 0; j <= MAXBSIZE/PAGE_SIZE; j++) 1876 if (counts[j] != 0) 1877 printf(", %d-%d", j * PAGE_SIZE, counts[j]); 1878 printf("\n"); 1879 } 1880} 1881#endif /* DEBUG */ 1882 1883/* ------------------------------ */ 1884 1885buf_t * 1886getiobuf(struct vnode *vp, bool waitok) 1887{ 1888 buf_t *bp; 1889 1890 bp = pool_cache_get(bufio_cache, (waitok ? PR_WAITOK : PR_NOWAIT)); 1891 if (bp == NULL) 1892 return bp; 1893 1894 buf_init(bp); 1895 1896 if ((bp->b_vp = vp) == NULL) 1897 bp->b_objlock = &buffer_lock; 1898 else 1899 bp->b_objlock = vp->v_interlock; 1900 1901 return bp; 1902} 1903 1904void 1905putiobuf(buf_t *bp) 1906{ 1907 1908 buf_destroy(bp); 1909 pool_cache_put(bufio_cache, bp); 1910} 1911 1912/* 1913 * nestiobuf_iodone: b_iodone callback for nested buffers. 1914 */ 1915 1916void 1917nestiobuf_iodone(buf_t *bp) 1918{ 1919 buf_t *mbp = bp->b_private; 1920 int error; 1921 int donebytes; 1922 1923 KASSERT(bp->b_bcount <= bp->b_bufsize); 1924 KASSERT(mbp != bp); 1925 1926 error = bp->b_error; 1927 if (bp->b_error == 0 && 1928 (bp->b_bcount < bp->b_bufsize || bp->b_resid > 0)) { 1929 /* 1930 * Not all got transfered, raise an error. We have no way to 1931 * propagate these conditions to mbp. 1932 */ 1933 error = EIO; 1934 } 1935 1936 donebytes = bp->b_bufsize; 1937 1938 putiobuf(bp); 1939 nestiobuf_done(mbp, donebytes, error); 1940} 1941 1942/* 1943 * nestiobuf_setup: setup a "nested" buffer. 1944 * 1945 * => 'mbp' is a "master" buffer which is being divided into sub pieces. 1946 * => 'bp' should be a buffer allocated by getiobuf. 1947 * => 'offset' is a byte offset in the master buffer. 1948 * => 'size' is a size in bytes of this nested buffer. 1949 */ 1950 1951void 1952nestiobuf_setup(buf_t *mbp, buf_t *bp, int offset, size_t size) 1953{ 1954 const int b_read = mbp->b_flags & B_READ; 1955 struct vnode *vp = mbp->b_vp; 1956 1957 KASSERT(mbp->b_bcount >= offset + size); 1958 bp->b_vp = vp; 1959 bp->b_dev = mbp->b_dev; 1960 bp->b_objlock = mbp->b_objlock; 1961 bp->b_cflags = BC_BUSY; 1962 bp->b_flags = B_ASYNC | b_read; 1963 bp->b_iodone = nestiobuf_iodone; 1964 bp->b_data = (char *)mbp->b_data + offset; 1965 bp->b_resid = bp->b_bcount = size; 1966 bp->b_bufsize = bp->b_bcount; 1967 bp->b_private = mbp; 1968 BIO_COPYPRIO(bp, mbp); 1969 if (!b_read && vp != NULL) { 1970 mutex_enter(vp->v_interlock); 1971 vp->v_numoutput++; 1972 mutex_exit(vp->v_interlock); 1973 } 1974} 1975 1976/* 1977 * nestiobuf_done: propagate completion to the master buffer. 1978 * 1979 * => 'donebytes' specifies how many bytes in the 'mbp' is completed. 1980 * => 'error' is an errno(2) that 'donebytes' has been completed with. 1981 */ 1982 1983void 1984nestiobuf_done(buf_t *mbp, int donebytes, int error) 1985{ 1986 1987 if (donebytes == 0) { 1988 return; 1989 } 1990 mutex_enter(mbp->b_objlock); 1991 KASSERT(mbp->b_resid >= donebytes); 1992 mbp->b_resid -= donebytes; 1993 if (error) 1994 mbp->b_error = error; 1995 if (mbp->b_resid == 0) { 1996 if (mbp->b_error) 1997 mbp->b_resid = mbp->b_bcount; 1998 mutex_exit(mbp->b_objlock); 1999 biodone(mbp); 2000 } else 2001 mutex_exit(mbp->b_objlock); 2002} 2003 2004void 2005buf_init(buf_t *bp) 2006{ 2007 2008 cv_init(&bp->b_busy, "biolock"); 2009 cv_init(&bp->b_done, "biowait"); 2010 bp->b_dev = NODEV; 2011 bp->b_error = 0; 2012 bp->b_flags = 0; 2013 bp->b_cflags = 0; 2014 bp->b_oflags = 0; 2015 bp->b_objlock = &buffer_lock; 2016 bp->b_iodone = NULL; 2017 bp->b_refcnt = 1; 2018 bp->b_dev = NODEV; 2019 bp->b_vnbufs.le_next = NOLIST; 2020 BIO_SETPRIO(bp, BPRIO_DEFAULT); 2021} 2022 2023void 2024buf_destroy(buf_t *bp) 2025{ 2026 2027 cv_destroy(&bp->b_done); 2028 cv_destroy(&bp->b_busy); 2029} 2030 2031int 2032bbusy(buf_t *bp, bool intr, int timo, kmutex_t *interlock) 2033{ 2034 int error; 2035 2036 KASSERT(mutex_owned(&bufcache_lock)); 2037 2038 if ((bp->b_cflags & BC_BUSY) != 0) { 2039 if (curlwp == uvm.pagedaemon_lwp) 2040 return EDEADLK; 2041 bp->b_cflags |= BC_WANTED; 2042 bref(bp); 2043 if (interlock != NULL) 2044 mutex_exit(interlock); 2045 if (intr) { 2046 error = cv_timedwait_sig(&bp->b_busy, &bufcache_lock, 2047 timo); 2048 } else { 2049 error = cv_timedwait(&bp->b_busy, &bufcache_lock, 2050 timo); 2051 } 2052 brele(bp); 2053 if (interlock != NULL) 2054 mutex_enter(interlock); 2055 if (error != 0) 2056 return error; 2057 return EPASSTHROUGH; 2058 } 2059 bp->b_cflags |= BC_BUSY; 2060 2061 return 0; 2062} 2063