vfs_bio.c revision 1.194
1/* $NetBSD: vfs_bio.c,v 1.194 2008/03/27 19:06:52 ad Exp $ */ 2 3/*- 4 * Copyright (c) 2007 The NetBSD Foundation, Inc. 5 * All rights reserved. 6 * 7 * This code is derived from software contributed to The NetBSD Foundation 8 * by Andrew Doran. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 3. All advertising materials mentioning features or use of this software 19 * must display the following acknowledgement: 20 * This product includes software developed by the NetBSD 21 * Foundation, Inc. and its contributors. 22 * 4. Neither the name of The NetBSD Foundation nor the names of its 23 * contributors may be used to endorse or promote products derived 24 * from this software without specific prior written permission. 25 * 26 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 27 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 28 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 29 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 30 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 31 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 32 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 33 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 34 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 35 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 36 * POSSIBILITY OF SUCH DAMAGE. 37 */ 38 39/*- 40 * Copyright (c) 1982, 1986, 1989, 1993 41 * The Regents of the University of California. All rights reserved. 42 * (c) UNIX System Laboratories, Inc. 43 * All or some portions of this file are derived from material licensed 44 * to the University of California by American Telephone and Telegraph 45 * Co. or Unix System Laboratories, Inc. and are reproduced herein with 46 * the permission of UNIX System Laboratories, Inc. 47 * 48 * Redistribution and use in source and binary forms, with or without 49 * modification, are permitted provided that the following conditions 50 * are met: 51 * 1. Redistributions of source code must retain the above copyright 52 * notice, this list of conditions and the following disclaimer. 53 * 2. Redistributions in binary form must reproduce the above copyright 54 * notice, this list of conditions and the following disclaimer in the 55 * documentation and/or other materials provided with the distribution. 56 * 3. Neither the name of the University nor the names of its contributors 57 * may be used to endorse or promote products derived from this software 58 * without specific prior written permission. 59 * 60 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 61 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 62 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 63 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 64 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 65 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 66 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 67 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 68 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 69 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 70 * SUCH DAMAGE. 71 * 72 * @(#)vfs_bio.c 8.6 (Berkeley) 1/11/94 73 */ 74 75/*- 76 * Copyright (c) 1994 Christopher G. Demetriou 77 * 78 * Redistribution and use in source and binary forms, with or without 79 * modification, are permitted provided that the following conditions 80 * are met: 81 * 1. Redistributions of source code must retain the above copyright 82 * notice, this list of conditions and the following disclaimer. 83 * 2. Redistributions in binary form must reproduce the above copyright 84 * notice, this list of conditions and the following disclaimer in the 85 * documentation and/or other materials provided with the distribution. 86 * 3. All advertising materials mentioning features or use of this software 87 * must display the following acknowledgement: 88 * This product includes software developed by the University of 89 * California, Berkeley and its contributors. 90 * 4. Neither the name of the University nor the names of its contributors 91 * may be used to endorse or promote products derived from this software 92 * without specific prior written permission. 93 * 94 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 95 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 96 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 97 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 98 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 99 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 100 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 101 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 102 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 103 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 104 * SUCH DAMAGE. 105 * 106 * @(#)vfs_bio.c 8.6 (Berkeley) 1/11/94 107 */ 108 109/* 110 * Some references: 111 * Bach: The Design of the UNIX Operating System (Prentice Hall, 1986) 112 * Leffler, et al.: The Design and Implementation of the 4.3BSD 113 * UNIX Operating System (Addison Welley, 1989) 114 */ 115 116#include <sys/cdefs.h> 117__KERNEL_RCSID(0, "$NetBSD: vfs_bio.c,v 1.194 2008/03/27 19:06:52 ad Exp $"); 118 119#include "fs_ffs.h" 120#include "opt_bufcache.h" 121 122#include <sys/param.h> 123#include <sys/systm.h> 124#include <sys/kernel.h> 125#include <sys/proc.h> 126#include <sys/buf.h> 127#include <sys/vnode.h> 128#include <sys/mount.h> 129#include <sys/malloc.h> 130#include <sys/resourcevar.h> 131#include <sys/sysctl.h> 132#include <sys/conf.h> 133#include <sys/kauth.h> 134#include <sys/intr.h> 135#include <sys/cpu.h> 136 137#include <uvm/uvm.h> 138 139#include <miscfs/specfs/specdev.h> 140 141#ifndef BUFPAGES 142# define BUFPAGES 0 143#endif 144 145#ifdef BUFCACHE 146# if (BUFCACHE < 5) || (BUFCACHE > 95) 147# error BUFCACHE is not between 5 and 95 148# endif 149#else 150# define BUFCACHE 15 151#endif 152 153u_int nbuf; /* XXX - for softdep_lockedbufs */ 154u_int bufpages = BUFPAGES; /* optional hardwired count */ 155u_int bufcache = BUFCACHE; /* max % of RAM to use for buffer cache */ 156 157/* Function prototypes */ 158struct bqueue; 159 160static void buf_setwm(void); 161static int buf_trim(void); 162static void *bufpool_page_alloc(struct pool *, int); 163static void bufpool_page_free(struct pool *, void *); 164static buf_t *bio_doread(struct vnode *, daddr_t, int, 165 kauth_cred_t, int); 166static buf_t *getnewbuf(int, int, int); 167static int buf_lotsfree(void); 168static int buf_canrelease(void); 169static u_long buf_mempoolidx(u_long); 170static u_long buf_roundsize(u_long); 171static void *buf_malloc(size_t); 172static void buf_mrelease(void *, size_t); 173static void binsheadfree(buf_t *, struct bqueue *); 174static void binstailfree(buf_t *, struct bqueue *); 175int count_lock_queue(void); /* XXX */ 176#ifdef DEBUG 177static int checkfreelist(buf_t *, struct bqueue *); 178#endif 179static void biointr(void *); 180static void biodone2(buf_t *); 181static void bref(buf_t *); 182static void brele(buf_t *); 183 184/* 185 * Definitions for the buffer hash lists. 186 */ 187#define BUFHASH(dvp, lbn) \ 188 (&bufhashtbl[(((long)(dvp) >> 8) + (int)(lbn)) & bufhash]) 189LIST_HEAD(bufhashhdr, buf) *bufhashtbl, invalhash; 190u_long bufhash; 191struct bqueue bufqueues[BQUEUES]; 192const struct bio_ops *bioopsp; /* I/O operation notification */ 193 194static kcondvar_t needbuffer_cv; 195 196/* 197 * Buffer queue lock. 198 */ 199kmutex_t bufcache_lock; 200kmutex_t buffer_lock; 201 202/* Software ISR for completed transfers. */ 203static void *biodone_sih; 204 205/* Buffer pool for I/O buffers. */ 206static pool_cache_t buf_cache; 207static pool_cache_t bufio_cache; 208 209/* XXX - somewhat gross.. */ 210#if MAXBSIZE == 0x2000 211#define NMEMPOOLS 5 212#elif MAXBSIZE == 0x4000 213#define NMEMPOOLS 6 214#elif MAXBSIZE == 0x8000 215#define NMEMPOOLS 7 216#else 217#define NMEMPOOLS 8 218#endif 219 220#define MEMPOOL_INDEX_OFFSET 9 /* smallest pool is 512 bytes */ 221#if (1 << (NMEMPOOLS + MEMPOOL_INDEX_OFFSET - 1)) != MAXBSIZE 222#error update vfs_bio buffer memory parameters 223#endif 224 225/* Buffer memory pools */ 226static struct pool bmempools[NMEMPOOLS]; 227 228static struct vm_map *buf_map; 229 230/* 231 * Buffer memory pool allocator. 232 */ 233static void * 234bufpool_page_alloc(struct pool *pp, int flags) 235{ 236 237 return (void *)uvm_km_alloc(buf_map, 238 MAXBSIZE, MAXBSIZE, 239 ((flags & PR_WAITOK) ? 0 : UVM_KMF_NOWAIT | UVM_KMF_TRYLOCK) 240 | UVM_KMF_WIRED); 241} 242 243static void 244bufpool_page_free(struct pool *pp, void *v) 245{ 246 247 uvm_km_free(buf_map, (vaddr_t)v, MAXBSIZE, UVM_KMF_WIRED); 248} 249 250static struct pool_allocator bufmempool_allocator = { 251 .pa_alloc = bufpool_page_alloc, 252 .pa_free = bufpool_page_free, 253 .pa_pagesz = MAXBSIZE, 254}; 255 256/* Buffer memory management variables */ 257u_long bufmem_valimit; 258u_long bufmem_hiwater; 259u_long bufmem_lowater; 260u_long bufmem; 261 262/* 263 * MD code can call this to set a hard limit on the amount 264 * of virtual memory used by the buffer cache. 265 */ 266int 267buf_setvalimit(vsize_t sz) 268{ 269 270 /* We need to accommodate at least NMEMPOOLS of MAXBSIZE each */ 271 if (sz < NMEMPOOLS * MAXBSIZE) 272 return EINVAL; 273 274 bufmem_valimit = sz; 275 return 0; 276} 277 278static void 279buf_setwm(void) 280{ 281 282 bufmem_hiwater = buf_memcalc(); 283 /* lowater is approx. 2% of memory (with bufcache = 15) */ 284#define BUFMEM_WMSHIFT 3 285#define BUFMEM_HIWMMIN (64 * 1024 << BUFMEM_WMSHIFT) 286 if (bufmem_hiwater < BUFMEM_HIWMMIN) 287 /* Ensure a reasonable minimum value */ 288 bufmem_hiwater = BUFMEM_HIWMMIN; 289 bufmem_lowater = bufmem_hiwater >> BUFMEM_WMSHIFT; 290} 291 292#ifdef DEBUG 293int debug_verify_freelist = 0; 294static int 295checkfreelist(buf_t *bp, struct bqueue *dp) 296{ 297 buf_t *b; 298 299 if (!debug_verify_freelist) 300 return 1; 301 302 TAILQ_FOREACH(b, &dp->bq_queue, b_freelist) { 303 if (b == bp) 304 return 1; 305 } 306 307 return 0; 308} 309#endif 310 311/* 312 * Insq/Remq for the buffer hash lists. 313 * Call with buffer queue locked. 314 */ 315static void 316binsheadfree(buf_t *bp, struct bqueue *dp) 317{ 318 319 KASSERT(bp->b_freelistindex == -1); 320 TAILQ_INSERT_HEAD(&dp->bq_queue, bp, b_freelist); 321 dp->bq_bytes += bp->b_bufsize; 322 bp->b_freelistindex = dp - bufqueues; 323} 324 325static void 326binstailfree(buf_t *bp, struct bqueue *dp) 327{ 328 329 KASSERT(bp->b_freelistindex == -1); 330 TAILQ_INSERT_TAIL(&dp->bq_queue, bp, b_freelist); 331 dp->bq_bytes += bp->b_bufsize; 332 bp->b_freelistindex = dp - bufqueues; 333} 334 335void 336bremfree(buf_t *bp) 337{ 338 struct bqueue *dp; 339 int bqidx = bp->b_freelistindex; 340 341 KASSERT(mutex_owned(&bufcache_lock)); 342 343 KASSERT(bqidx != -1); 344 dp = &bufqueues[bqidx]; 345 KDASSERT(checkfreelist(bp, dp)); 346 KASSERT(dp->bq_bytes >= bp->b_bufsize); 347 TAILQ_REMOVE(&dp->bq_queue, bp, b_freelist); 348 dp->bq_bytes -= bp->b_bufsize; 349 350 /* For the sysctl helper. */ 351 if (bp == dp->bq_marker) 352 dp->bq_marker = NULL; 353 354#if defined(DIAGNOSTIC) 355 bp->b_freelistindex = -1; 356#endif /* defined(DIAGNOSTIC) */ 357} 358 359/* 360 * Add a reference to an buffer structure that came from buf_cache. 361 */ 362static inline void 363bref(buf_t *bp) 364{ 365 366 KASSERT(mutex_owned(&bufcache_lock)); 367 KASSERT(bp->b_refcnt > 0); 368 369 bp->b_refcnt++; 370} 371 372/* 373 * Free an unused buffer structure that came from buf_cache. 374 */ 375static inline void 376brele(buf_t *bp) 377{ 378 379 KASSERT(mutex_owned(&bufcache_lock)); 380 KASSERT(bp->b_refcnt > 0); 381 382 if (bp->b_refcnt-- == 1) { 383 buf_destroy(bp); 384#ifdef DEBUG 385 memset((char *)bp, 0, sizeof(*bp)); 386#endif 387 pool_cache_put(buf_cache, bp); 388 } 389} 390 391/* 392 * note that for some ports this is used by pmap bootstrap code to 393 * determine kva size. 394 */ 395u_long 396buf_memcalc(void) 397{ 398 u_long n; 399 400 /* 401 * Determine the upper bound of memory to use for buffers. 402 * 403 * - If bufpages is specified, use that as the number 404 * pages. 405 * 406 * - Otherwise, use bufcache as the percentage of 407 * physical memory. 408 */ 409 if (bufpages != 0) { 410 n = bufpages; 411 } else { 412 if (bufcache < 5) { 413 printf("forcing bufcache %d -> 5", bufcache); 414 bufcache = 5; 415 } 416 if (bufcache > 95) { 417 printf("forcing bufcache %d -> 95", bufcache); 418 bufcache = 95; 419 } 420 n = calc_cache_size(buf_map, bufcache, 421 (buf_map != kernel_map) ? 100 : BUFCACHE_VA_MAXPCT) 422 / PAGE_SIZE; 423 } 424 425 n <<= PAGE_SHIFT; 426 if (bufmem_valimit != 0 && n > bufmem_valimit) 427 n = bufmem_valimit; 428 429 return (n); 430} 431 432/* 433 * Initialize buffers and hash links for buffers. 434 */ 435void 436bufinit(void) 437{ 438 struct bqueue *dp; 439 int use_std; 440 u_int i; 441 442 mutex_init(&bufcache_lock, MUTEX_DEFAULT, IPL_NONE); 443 mutex_init(&buffer_lock, MUTEX_DEFAULT, IPL_NONE); 444 cv_init(&needbuffer_cv, "needbuf"); 445 446 if (bufmem_valimit != 0) { 447 vaddr_t minaddr = 0, maxaddr; 448 buf_map = uvm_km_suballoc(kernel_map, &minaddr, &maxaddr, 449 bufmem_valimit, 0, false, 0); 450 if (buf_map == NULL) 451 panic("bufinit: cannot allocate submap"); 452 } else 453 buf_map = kernel_map; 454 455 /* 456 * Initialize buffer cache memory parameters. 457 */ 458 bufmem = 0; 459 buf_setwm(); 460 461 /* On "small" machines use small pool page sizes where possible */ 462 use_std = (physmem < atop(16*1024*1024)); 463 464 /* 465 * Also use them on systems that can map the pool pages using 466 * a direct-mapped segment. 467 */ 468#ifdef PMAP_MAP_POOLPAGE 469 use_std = 1; 470#endif 471 472 buf_cache = pool_cache_init(sizeof(buf_t), 0, 0, 0, 473 "bufpl", NULL, IPL_SOFTBIO, NULL, NULL, NULL); 474 bufio_cache = pool_cache_init(sizeof(buf_t), 0, 0, 0, 475 "biopl", NULL, IPL_BIO, NULL, NULL, NULL); 476 477 bufmempool_allocator.pa_backingmap = buf_map; 478 for (i = 0; i < NMEMPOOLS; i++) { 479 struct pool_allocator *pa; 480 struct pool *pp = &bmempools[i]; 481 u_int size = 1 << (i + MEMPOOL_INDEX_OFFSET); 482 char *name = malloc(8, M_TEMP, M_WAITOK); 483 if (__predict_true(size >= 1024)) 484 (void)snprintf(name, 8, "buf%dk", size / 1024); 485 else 486 (void)snprintf(name, 8, "buf%db", size); 487 pa = (size <= PAGE_SIZE && use_std) 488 ? &pool_allocator_nointr 489 : &bufmempool_allocator; 490 pool_init(pp, size, 0, 0, 0, name, pa, IPL_NONE); 491 pool_setlowat(pp, 1); 492 pool_sethiwat(pp, 1); 493 } 494 495 /* Initialize the buffer queues */ 496 for (dp = bufqueues; dp < &bufqueues[BQUEUES]; dp++) { 497 TAILQ_INIT(&dp->bq_queue); 498 dp->bq_bytes = 0; 499 } 500 501 /* 502 * Estimate hash table size based on the amount of memory we 503 * intend to use for the buffer cache. The average buffer 504 * size is dependent on our clients (i.e. filesystems). 505 * 506 * For now, use an empirical 3K per buffer. 507 */ 508 nbuf = (bufmem_hiwater / 1024) / 3; 509 bufhashtbl = hashinit(nbuf, HASH_LIST, M_CACHE, M_WAITOK, &bufhash); 510} 511 512void 513bufinit2(void) 514{ 515 516 biodone_sih = softint_establish(SOFTINT_BIO | SOFTINT_MPSAFE, biointr, 517 NULL); 518 if (biodone_sih == NULL) 519 panic("bufinit2: can't establish soft interrupt"); 520} 521 522static int 523buf_lotsfree(void) 524{ 525 int try, thresh; 526 527 /* Always allocate if doing copy on write */ 528 if (curlwp->l_pflag & LP_UFSCOW) 529 return 1; 530 531 /* Always allocate if less than the low water mark. */ 532 if (bufmem < bufmem_lowater) 533 return 1; 534 535 /* Never allocate if greater than the high water mark. */ 536 if (bufmem > bufmem_hiwater) 537 return 0; 538 539 /* If there's anything on the AGE list, it should be eaten. */ 540 if (TAILQ_FIRST(&bufqueues[BQ_AGE].bq_queue) != NULL) 541 return 0; 542 543 /* 544 * The probabily of getting a new allocation is inversely 545 * proportional to the current size of the cache, using 546 * a granularity of 16 steps. 547 */ 548 try = random() & 0x0000000fL; 549 550 /* Don't use "16 * bufmem" here to avoid a 32-bit overflow. */ 551 thresh = (bufmem - bufmem_lowater) / 552 ((bufmem_hiwater - bufmem_lowater) / 16); 553 554 if (try >= thresh) 555 return 1; 556 557 /* Otherwise don't allocate. */ 558 return 0; 559} 560 561/* 562 * Return estimate of bytes we think need to be 563 * released to help resolve low memory conditions. 564 * 565 * => called with bufcache_lock held. 566 */ 567static int 568buf_canrelease(void) 569{ 570 int pagedemand, ninvalid = 0; 571 572 KASSERT(mutex_owned(&bufcache_lock)); 573 574 if (bufmem < bufmem_lowater) 575 return 0; 576 577 if (bufmem > bufmem_hiwater) 578 return bufmem - bufmem_hiwater; 579 580 ninvalid += bufqueues[BQ_AGE].bq_bytes; 581 582 pagedemand = uvmexp.freetarg - uvmexp.free; 583 if (pagedemand < 0) 584 return ninvalid; 585 return MAX(ninvalid, MIN(2 * MAXBSIZE, 586 MIN((bufmem - bufmem_lowater) / 16, pagedemand * PAGE_SIZE))); 587} 588 589/* 590 * Buffer memory allocation helper functions 591 */ 592static u_long 593buf_mempoolidx(u_long size) 594{ 595 u_int n = 0; 596 597 size -= 1; 598 size >>= MEMPOOL_INDEX_OFFSET; 599 while (size) { 600 size >>= 1; 601 n += 1; 602 } 603 if (n >= NMEMPOOLS) 604 panic("buf mem pool index %d", n); 605 return n; 606} 607 608static u_long 609buf_roundsize(u_long size) 610{ 611 /* Round up to nearest power of 2 */ 612 return (1 << (buf_mempoolidx(size) + MEMPOOL_INDEX_OFFSET)); 613} 614 615static void * 616buf_malloc(size_t size) 617{ 618 u_int n = buf_mempoolidx(size); 619 void *addr; 620 621 while (1) { 622 addr = pool_get(&bmempools[n], PR_NOWAIT); 623 if (addr != NULL) 624 break; 625 626 /* No memory, see if we can free some. If so, try again */ 627 mutex_enter(&bufcache_lock); 628 if (buf_drain(1) > 0) { 629 mutex_exit(&bufcache_lock); 630 continue; 631 } 632 633 if (curlwp == uvm.pagedaemon_lwp) { 634 mutex_exit(&bufcache_lock); 635 return NULL; 636 } 637 638 /* Wait for buffers to arrive on the LRU queue */ 639 cv_timedwait(&needbuffer_cv, &bufcache_lock, hz / 4); 640 mutex_exit(&bufcache_lock); 641 } 642 643 return addr; 644} 645 646static void 647buf_mrelease(void *addr, size_t size) 648{ 649 650 pool_put(&bmempools[buf_mempoolidx(size)], addr); 651} 652 653/* 654 * bread()/breadn() helper. 655 */ 656static buf_t * 657bio_doread(struct vnode *vp, daddr_t blkno, int size, kauth_cred_t cred, 658 int async) 659{ 660 buf_t *bp; 661 struct mount *mp; 662 663 bp = getblk(vp, blkno, size, 0, 0); 664 665#ifdef DIAGNOSTIC 666 if (bp == NULL) { 667 panic("bio_doread: no such buf"); 668 } 669#endif 670 671 /* 672 * If buffer does not have data valid, start a read. 673 * Note that if buffer is BC_INVAL, getblk() won't return it. 674 * Therefore, it's valid if its I/O has completed or been delayed. 675 */ 676 if (!ISSET(bp->b_oflags, (BO_DONE | BO_DELWRI))) { 677 /* Start I/O for the buffer. */ 678 SET(bp->b_flags, B_READ | async); 679 if (async) 680 BIO_SETPRIO(bp, BPRIO_TIMELIMITED); 681 else 682 BIO_SETPRIO(bp, BPRIO_TIMECRITICAL); 683 VOP_STRATEGY(vp, bp); 684 685 /* Pay for the read. */ 686 curlwp->l_ru.ru_inblock++; 687 } else if (async) 688 brelse(bp, 0); 689 690 if (vp->v_type == VBLK) 691 mp = vp->v_specmountpoint; 692 else 693 mp = vp->v_mount; 694 695 /* 696 * Collect statistics on synchronous and asynchronous reads. 697 * Reads from block devices are charged to their associated 698 * filesystem (if any). 699 */ 700 if (mp != NULL) { 701 if (async == 0) 702 mp->mnt_stat.f_syncreads++; 703 else 704 mp->mnt_stat.f_asyncreads++; 705 } 706 707 return (bp); 708} 709 710/* 711 * Read a disk block. 712 * This algorithm described in Bach (p.54). 713 */ 714int 715bread(struct vnode *vp, daddr_t blkno, int size, kauth_cred_t cred, 716 buf_t **bpp) 717{ 718 buf_t *bp; 719 720 /* Get buffer for block. */ 721 bp = *bpp = bio_doread(vp, blkno, size, cred, 0); 722 723 /* Wait for the read to complete, and return result. */ 724 return (biowait(bp)); 725} 726 727/* 728 * Read-ahead multiple disk blocks. The first is sync, the rest async. 729 * Trivial modification to the breada algorithm presented in Bach (p.55). 730 */ 731int 732breadn(struct vnode *vp, daddr_t blkno, int size, daddr_t *rablks, 733 int *rasizes, int nrablks, kauth_cred_t cred, buf_t **bpp) 734{ 735 buf_t *bp; 736 int i; 737 738 bp = *bpp = bio_doread(vp, blkno, size, cred, 0); 739 740 /* 741 * For each of the read-ahead blocks, start a read, if necessary. 742 */ 743 mutex_enter(&bufcache_lock); 744 for (i = 0; i < nrablks; i++) { 745 /* If it's in the cache, just go on to next one. */ 746 if (incore(vp, rablks[i])) 747 continue; 748 749 /* Get a buffer for the read-ahead block */ 750 mutex_exit(&bufcache_lock); 751 (void) bio_doread(vp, rablks[i], rasizes[i], cred, B_ASYNC); 752 mutex_enter(&bufcache_lock); 753 } 754 mutex_exit(&bufcache_lock); 755 756 /* Otherwise, we had to start a read for it; wait until it's valid. */ 757 return (biowait(bp)); 758} 759 760/* 761 * Read with single-block read-ahead. Defined in Bach (p.55), but 762 * implemented as a call to breadn(). 763 * XXX for compatibility with old file systems. 764 */ 765int 766breada(struct vnode *vp, daddr_t blkno, int size, daddr_t rablkno, 767 int rabsize, kauth_cred_t cred, buf_t **bpp) 768{ 769 770 return (breadn(vp, blkno, size, &rablkno, &rabsize, 1, cred, bpp)); 771} 772 773/* 774 * Block write. Described in Bach (p.56) 775 */ 776int 777bwrite(buf_t *bp) 778{ 779 int rv, sync, wasdelayed; 780 struct vnode *vp; 781 struct mount *mp; 782 783 KASSERT(ISSET(bp->b_cflags, BC_BUSY)); 784 785 vp = bp->b_vp; 786 if (vp != NULL) { 787 KASSERT(bp->b_objlock == &vp->v_interlock); 788 if (vp->v_type == VBLK) 789 mp = vp->v_specmountpoint; 790 else 791 mp = vp->v_mount; 792 } else { 793 mp = NULL; 794 } 795 796 /* 797 * Remember buffer type, to switch on it later. If the write was 798 * synchronous, but the file system was mounted with MNT_ASYNC, 799 * convert it to a delayed write. 800 * XXX note that this relies on delayed tape writes being converted 801 * to async, not sync writes (which is safe, but ugly). 802 */ 803 sync = !ISSET(bp->b_flags, B_ASYNC); 804 if (sync && mp != NULL && ISSET(mp->mnt_flag, MNT_ASYNC)) { 805 bdwrite(bp); 806 return (0); 807 } 808 809 /* 810 * Collect statistics on synchronous and asynchronous writes. 811 * Writes to block devices are charged to their associated 812 * filesystem (if any). 813 */ 814 if (mp != NULL) { 815 if (sync) 816 mp->mnt_stat.f_syncwrites++; 817 else 818 mp->mnt_stat.f_asyncwrites++; 819 } 820 821 /* 822 * Pay for the I/O operation and make sure the buf is on the correct 823 * vnode queue. 824 */ 825 bp->b_error = 0; 826 wasdelayed = ISSET(bp->b_oflags, BO_DELWRI); 827 CLR(bp->b_flags, B_READ); 828 if (wasdelayed) { 829 mutex_enter(&bufcache_lock); 830 mutex_enter(bp->b_objlock); 831 CLR(bp->b_oflags, BO_DONE | BO_DELWRI); 832 reassignbuf(bp, bp->b_vp); 833 mutex_exit(&bufcache_lock); 834 } else { 835 curlwp->l_ru.ru_oublock++; 836 mutex_enter(bp->b_objlock); 837 CLR(bp->b_oflags, BO_DONE | BO_DELWRI); 838 } 839 if (vp != NULL) 840 vp->v_numoutput++; 841 mutex_exit(bp->b_objlock); 842 843 /* Initiate disk write. */ 844 if (sync) 845 BIO_SETPRIO(bp, BPRIO_TIMECRITICAL); 846 else 847 BIO_SETPRIO(bp, BPRIO_TIMELIMITED); 848 849 VOP_STRATEGY(vp, bp); 850 851 if (sync) { 852 /* If I/O was synchronous, wait for it to complete. */ 853 rv = biowait(bp); 854 855 /* Release the buffer. */ 856 brelse(bp, 0); 857 858 return (rv); 859 } else { 860 return (0); 861 } 862} 863 864int 865vn_bwrite(void *v) 866{ 867 struct vop_bwrite_args *ap = v; 868 869 return (bwrite(ap->a_bp)); 870} 871 872/* 873 * Delayed write. 874 * 875 * The buffer is marked dirty, but is not queued for I/O. 876 * This routine should be used when the buffer is expected 877 * to be modified again soon, typically a small write that 878 * partially fills a buffer. 879 * 880 * NB: magnetic tapes cannot be delayed; they must be 881 * written in the order that the writes are requested. 882 * 883 * Described in Leffler, et al. (pp. 208-213). 884 */ 885void 886bdwrite(buf_t *bp) 887{ 888 889 KASSERT(ISSET(bp->b_cflags, BC_BUSY)); 890 891 /* If this is a tape block, write the block now. */ 892 if (bdev_type(bp->b_dev) == D_TAPE) { 893 bawrite(bp); 894 return; 895 } 896 897 /* 898 * If the block hasn't been seen before: 899 * (1) Mark it as having been seen, 900 * (2) Charge for the write, 901 * (3) Make sure it's on its vnode's correct block list. 902 */ 903 KASSERT(bp->b_vp == NULL || bp->b_objlock == &bp->b_vp->v_interlock); 904 905 if (!ISSET(bp->b_oflags, BO_DELWRI)) { 906 mutex_enter(&bufcache_lock); 907 mutex_enter(bp->b_objlock); 908 SET(bp->b_oflags, BO_DELWRI); 909 curlwp->l_ru.ru_oublock++; 910 reassignbuf(bp, bp->b_vp); 911 mutex_exit(&bufcache_lock); 912 } else { 913 mutex_enter(bp->b_objlock); 914 } 915 /* Otherwise, the "write" is done, so mark and release the buffer. */ 916 CLR(bp->b_oflags, BO_DONE); 917 mutex_exit(bp->b_objlock); 918 919 brelse(bp, 0); 920} 921 922/* 923 * Asynchronous block write; just an asynchronous bwrite(). 924 */ 925void 926bawrite(buf_t *bp) 927{ 928 929 KASSERT(ISSET(bp->b_cflags, BC_BUSY)); 930 931 SET(bp->b_flags, B_ASYNC); 932 VOP_BWRITE(bp); 933} 934 935/* 936 * Same as first half of bdwrite, mark buffer dirty, but do not release it. 937 * Call with the buffer interlock held. 938 * 939 * Note: called only from biodone() through ffs softdep's io_complete() 940 */ 941void 942bdirty(buf_t *bp) 943{ 944 945 KASSERT(mutex_owned(&bufcache_lock)); 946 KASSERT(bp->b_objlock == &bp->b_vp->v_interlock); 947 KASSERT(mutex_owned(bp->b_objlock)); 948 KASSERT(ISSET(bp->b_cflags, BC_BUSY)); 949 950 CLR(bp->b_cflags, BC_AGE); 951 952 if (!ISSET(bp->b_oflags, BO_DELWRI)) { 953 SET(bp->b_oflags, BO_DELWRI); 954 curlwp->l_ru.ru_oublock++; 955 reassignbuf(bp, bp->b_vp); 956 } 957} 958 959 960/* 961 * Release a buffer on to the free lists. 962 * Described in Bach (p. 46). 963 */ 964void 965brelsel(buf_t *bp, int set) 966{ 967 struct bqueue *bufq; 968 struct vnode *vp; 969 970 KASSERT(mutex_owned(&bufcache_lock)); 971 972 SET(bp->b_cflags, set); 973 974 KASSERT(ISSET(bp->b_cflags, BC_BUSY)); 975 KASSERT(bp->b_iodone == NULL); 976 977 /* Wake up any processes waiting for any buffer to become free. */ 978 cv_signal(&needbuffer_cv); 979 980 /* Wake up any proceeses waiting for _this_ buffer to become */ 981 if (ISSET(bp->b_cflags, BC_WANTED) != 0) { 982 CLR(bp->b_cflags, BC_WANTED|BC_AGE); 983 cv_broadcast(&bp->b_busy); 984 } 985 986 /* 987 * Determine which queue the buffer should be on, then put it there. 988 */ 989 990 /* If it's locked, don't report an error; try again later. */ 991 if (ISSET(bp->b_flags, B_LOCKED)) 992 bp->b_error = 0; 993 994 /* If it's not cacheable, or an error, mark it invalid. */ 995 if (ISSET(bp->b_cflags, BC_NOCACHE) || bp->b_error != 0) 996 SET(bp->b_cflags, BC_INVAL); 997 998 if (ISSET(bp->b_cflags, BC_VFLUSH)) { 999 /* 1000 * This is a delayed write buffer that was just flushed to 1001 * disk. It is still on the LRU queue. If it's become 1002 * invalid, then we need to move it to a different queue; 1003 * otherwise leave it in its current position. 1004 */ 1005 CLR(bp->b_cflags, BC_VFLUSH); 1006 if (!ISSET(bp->b_cflags, BC_INVAL|BC_AGE) && 1007 !ISSET(bp->b_flags, B_LOCKED) && bp->b_error == 0) { 1008 KDASSERT(checkfreelist(bp, &bufqueues[BQ_LRU])); 1009 goto already_queued; 1010 } else { 1011 bremfree(bp); 1012 } 1013 } 1014 1015 KDASSERT(checkfreelist(bp, &bufqueues[BQ_AGE])); 1016 KDASSERT(checkfreelist(bp, &bufqueues[BQ_LRU])); 1017 KDASSERT(checkfreelist(bp, &bufqueues[BQ_LOCKED])); 1018 1019 if ((bp->b_bufsize <= 0) || ISSET(bp->b_cflags, BC_INVAL)) { 1020 /* 1021 * If it's invalid or empty, dissociate it from its vnode 1022 * and put on the head of the appropriate queue. 1023 */ 1024 if (bioopsp != NULL) 1025 (*bioopsp->io_deallocate)(bp); 1026 1027 mutex_enter(bp->b_objlock); 1028 CLR(bp->b_oflags, BO_DONE|BO_DELWRI); 1029 if ((vp = bp->b_vp) != NULL) { 1030 KASSERT(bp->b_objlock == &vp->v_interlock); 1031 reassignbuf(bp, bp->b_vp); 1032 brelvp(bp); 1033 mutex_exit(&vp->v_interlock); 1034 } else { 1035 KASSERT(bp->b_objlock == &buffer_lock); 1036 mutex_exit(bp->b_objlock); 1037 } 1038 1039 if (bp->b_bufsize <= 0) 1040 /* no data */ 1041 goto already_queued; 1042 else 1043 /* invalid data */ 1044 bufq = &bufqueues[BQ_AGE]; 1045 binsheadfree(bp, bufq); 1046 } else { 1047 /* 1048 * It has valid data. Put it on the end of the appropriate 1049 * queue, so that it'll stick around for as long as possible. 1050 * If buf is AGE, but has dependencies, must put it on last 1051 * bufqueue to be scanned, ie LRU. This protects against the 1052 * livelock where BQ_AGE only has buffers with dependencies, 1053 * and we thus never get to the dependent buffers in BQ_LRU. 1054 */ 1055 if (ISSET(bp->b_flags, B_LOCKED)) { 1056 /* locked in core */ 1057 bufq = &bufqueues[BQ_LOCKED]; 1058 } else if (!ISSET(bp->b_cflags, BC_AGE)) { 1059 /* valid data */ 1060 bufq = &bufqueues[BQ_LRU]; 1061 } else { 1062 /* stale but valid data */ 1063 int has_deps; 1064 1065 if (bioopsp != NULL) 1066 has_deps = (*bioopsp->io_countdeps)(bp, 0); 1067 else 1068 has_deps = 0; 1069 bufq = has_deps ? &bufqueues[BQ_LRU] : 1070 &bufqueues[BQ_AGE]; 1071 } 1072 binstailfree(bp, bufq); 1073 } 1074already_queued: 1075 /* Unlock the buffer. */ 1076 CLR(bp->b_cflags, BC_AGE|BC_BUSY|BC_NOCACHE); 1077 CLR(bp->b_flags, B_ASYNC); 1078 1079 if (bp->b_bufsize <= 0) 1080 brele(bp); 1081} 1082 1083void 1084brelse(buf_t *bp, int set) 1085{ 1086 1087 mutex_enter(&bufcache_lock); 1088 brelsel(bp, set); 1089 mutex_exit(&bufcache_lock); 1090} 1091 1092/* 1093 * Determine if a block is in the cache. 1094 * Just look on what would be its hash chain. If it's there, return 1095 * a pointer to it, unless it's marked invalid. If it's marked invalid, 1096 * we normally don't return the buffer, unless the caller explicitly 1097 * wants us to. 1098 */ 1099buf_t * 1100incore(struct vnode *vp, daddr_t blkno) 1101{ 1102 buf_t *bp; 1103 1104 KASSERT(mutex_owned(&bufcache_lock)); 1105 1106 /* Search hash chain */ 1107 LIST_FOREACH(bp, BUFHASH(vp, blkno), b_hash) { 1108 if (bp->b_lblkno == blkno && bp->b_vp == vp && 1109 !ISSET(bp->b_cflags, BC_INVAL)) { 1110 KASSERT(bp->b_objlock == &vp->v_interlock); 1111 return (bp); 1112 } 1113 } 1114 1115 return (NULL); 1116} 1117 1118/* 1119 * Get a block of requested size that is associated with 1120 * a given vnode and block offset. If it is found in the 1121 * block cache, mark it as having been found, make it busy 1122 * and return it. Otherwise, return an empty block of the 1123 * correct size. It is up to the caller to insure that the 1124 * cached blocks be of the correct size. 1125 */ 1126buf_t * 1127getblk(struct vnode *vp, daddr_t blkno, int size, int slpflag, int slptimeo) 1128{ 1129 int err, preserve; 1130 buf_t *bp; 1131 1132 mutex_enter(&bufcache_lock); 1133 loop: 1134 bp = incore(vp, blkno); 1135 if (bp != NULL) { 1136 err = bbusy(bp, ((slpflag & PCATCH) != 0), slptimeo, NULL); 1137 if (err != 0) { 1138 if (err == EPASSTHROUGH) 1139 goto loop; 1140 mutex_exit(&bufcache_lock); 1141 return (NULL); 1142 } 1143#ifdef DIAGNOSTIC 1144 if (ISSET(bp->b_oflags, BO_DONE|BO_DELWRI) && 1145 bp->b_bcount < size && vp->v_type != VBLK) 1146 panic("getblk: block size invariant failed"); 1147#endif 1148 bremfree(bp); 1149 preserve = 1; 1150 } else { 1151 if ((bp = getnewbuf(slpflag, slptimeo, 0)) == NULL) 1152 goto loop; 1153 1154 if (incore(vp, blkno) != NULL) { 1155 /* The block has come into memory in the meantime. */ 1156 brelsel(bp, 0); 1157 goto loop; 1158 } 1159 1160 LIST_INSERT_HEAD(BUFHASH(vp, blkno), bp, b_hash); 1161 bp->b_blkno = bp->b_lblkno = bp->b_rawblkno = blkno; 1162 mutex_enter(&vp->v_interlock); 1163 bgetvp(vp, bp); 1164 mutex_exit(&vp->v_interlock); 1165 preserve = 0; 1166 } 1167 mutex_exit(&bufcache_lock); 1168 1169 /* 1170 * LFS can't track total size of B_LOCKED buffer (locked_queue_bytes) 1171 * if we re-size buffers here. 1172 */ 1173 if (ISSET(bp->b_flags, B_LOCKED)) { 1174 KASSERT(bp->b_bufsize >= size); 1175 } else { 1176 if (allocbuf(bp, size, preserve)) { 1177 mutex_enter(&bufcache_lock); 1178 LIST_REMOVE(bp, b_hash); 1179 mutex_exit(&bufcache_lock); 1180 brelse(bp, BC_INVAL); 1181 return NULL; 1182 } 1183 } 1184 BIO_SETPRIO(bp, BPRIO_DEFAULT); 1185 return (bp); 1186} 1187 1188/* 1189 * Get an empty, disassociated buffer of given size. 1190 */ 1191buf_t * 1192geteblk(int size) 1193{ 1194 buf_t *bp; 1195 int error; 1196 1197 mutex_enter(&bufcache_lock); 1198 while ((bp = getnewbuf(0, 0, 0)) == NULL) 1199 ; 1200 1201 SET(bp->b_cflags, BC_INVAL); 1202 LIST_INSERT_HEAD(&invalhash, bp, b_hash); 1203 mutex_exit(&bufcache_lock); 1204 BIO_SETPRIO(bp, BPRIO_DEFAULT); 1205 error = allocbuf(bp, size, 0); 1206 KASSERT(error == 0); 1207 return (bp); 1208} 1209 1210/* 1211 * Expand or contract the actual memory allocated to a buffer. 1212 * 1213 * If the buffer shrinks, data is lost, so it's up to the 1214 * caller to have written it out *first*; this routine will not 1215 * start a write. If the buffer grows, it's the callers 1216 * responsibility to fill out the buffer's additional contents. 1217 */ 1218int 1219allocbuf(buf_t *bp, int size, int preserve) 1220{ 1221 vsize_t oldsize, desired_size; 1222 void *addr; 1223 int delta; 1224 1225 desired_size = buf_roundsize(size); 1226 if (desired_size > MAXBSIZE) 1227 printf("allocbuf: buffer larger than MAXBSIZE requested"); 1228 1229 bp->b_bcount = size; 1230 1231 oldsize = bp->b_bufsize; 1232 if (oldsize == desired_size) 1233 return 0; 1234 1235 /* 1236 * If we want a buffer of a different size, re-allocate the 1237 * buffer's memory; copy old content only if needed. 1238 */ 1239 addr = buf_malloc(desired_size); 1240 if (addr == NULL) 1241 return ENOMEM; 1242 if (preserve) 1243 memcpy(addr, bp->b_data, MIN(oldsize,desired_size)); 1244 if (bp->b_data != NULL) 1245 buf_mrelease(bp->b_data, oldsize); 1246 bp->b_data = addr; 1247 bp->b_bufsize = desired_size; 1248 1249 /* 1250 * Update overall buffer memory counter (protected by bufcache_lock) 1251 */ 1252 delta = (long)desired_size - (long)oldsize; 1253 1254 mutex_enter(&bufcache_lock); 1255 if ((bufmem += delta) > bufmem_hiwater) { 1256 /* 1257 * Need to trim overall memory usage. 1258 */ 1259 while (buf_canrelease()) { 1260 if (curcpu()->ci_schedstate.spc_flags & 1261 SPCF_SHOULDYIELD) { 1262 mutex_exit(&bufcache_lock); 1263 preempt(); 1264 mutex_enter(&bufcache_lock); 1265 } 1266 if (buf_trim() == 0) 1267 break; 1268 } 1269 } 1270 mutex_exit(&bufcache_lock); 1271 return 0; 1272} 1273 1274/* 1275 * Find a buffer which is available for use. 1276 * Select something from a free list. 1277 * Preference is to AGE list, then LRU list. 1278 * 1279 * Called with the buffer queues locked. 1280 * Return buffer locked. 1281 */ 1282buf_t * 1283getnewbuf(int slpflag, int slptimeo, int from_bufq) 1284{ 1285 buf_t *bp; 1286 struct vnode *vp; 1287 1288 start: 1289 KASSERT(mutex_owned(&bufcache_lock)); 1290 1291 /* 1292 * Get a new buffer from the pool. 1293 */ 1294 if (!from_bufq && buf_lotsfree()) { 1295 mutex_exit(&bufcache_lock); 1296 bp = pool_cache_get(buf_cache, PR_NOWAIT); 1297 if (bp != NULL) { 1298 memset((char *)bp, 0, sizeof(*bp)); 1299 buf_init(bp); 1300 bp->b_dev = NODEV; 1301 bp->b_vnbufs.le_next = NOLIST; 1302 bp->b_cflags = BC_BUSY; 1303 bp->b_refcnt = 1; 1304 mutex_enter(&bufcache_lock); 1305#if defined(DIAGNOSTIC) 1306 bp->b_freelistindex = -1; 1307#endif /* defined(DIAGNOSTIC) */ 1308 return (bp); 1309 } 1310 mutex_enter(&bufcache_lock); 1311 } 1312 1313 if ((bp = TAILQ_FIRST(&bufqueues[BQ_AGE].bq_queue)) != NULL || 1314 (bp = TAILQ_FIRST(&bufqueues[BQ_LRU].bq_queue)) != NULL) { 1315 bremfree(bp); 1316 } else { 1317 /* 1318 * XXX: !from_bufq should be removed. 1319 */ 1320 if (!from_bufq || curlwp != uvm.pagedaemon_lwp) { 1321 /* wait for a free buffer of any kind */ 1322 if ((slpflag & PCATCH) != 0) 1323 (void)cv_timedwait_sig(&needbuffer_cv, 1324 &bufcache_lock, slptimeo); 1325 else 1326 (void)cv_timedwait(&needbuffer_cv, 1327 &bufcache_lock, slptimeo); 1328 } 1329 return (NULL); 1330 } 1331 1332#ifdef DIAGNOSTIC 1333 if (bp->b_bufsize <= 0) 1334 panic("buffer %p: on queue but empty", bp); 1335#endif 1336 1337 if (ISSET(bp->b_cflags, BC_VFLUSH)) { 1338 /* 1339 * This is a delayed write buffer being flushed to disk. Make 1340 * sure it gets aged out of the queue when it's finished, and 1341 * leave it off the LRU queue. 1342 */ 1343 CLR(bp->b_cflags, BC_VFLUSH); 1344 SET(bp->b_cflags, BC_AGE); 1345 goto start; 1346 } 1347 1348 /* Buffer is no longer on free lists. */ 1349 SET(bp->b_cflags, BC_BUSY); 1350 1351 /* 1352 * If buffer was a delayed write, start it and return NULL 1353 * (since we might sleep while starting the write). 1354 */ 1355 if (ISSET(bp->b_oflags, BO_DELWRI)) { 1356 /* 1357 * This buffer has gone through the LRU, so make sure it gets 1358 * reused ASAP. 1359 */ 1360 SET(bp->b_cflags, BC_AGE); 1361 mutex_exit(&bufcache_lock); 1362 bawrite(bp); 1363 mutex_enter(&bufcache_lock); 1364 return (NULL); 1365 } 1366 1367 vp = bp->b_vp; 1368 if (bioopsp != NULL) 1369 (*bioopsp->io_deallocate)(bp); 1370 1371 /* clear out various other fields */ 1372 bp->b_cflags = BC_BUSY; 1373 bp->b_oflags = 0; 1374 bp->b_flags = 0; 1375 bp->b_dev = NODEV; 1376 bp->b_blkno = 0; 1377 bp->b_lblkno = 0; 1378 bp->b_rawblkno = 0; 1379 bp->b_iodone = 0; 1380 bp->b_error = 0; 1381 bp->b_resid = 0; 1382 bp->b_bcount = 0; 1383 1384 LIST_REMOVE(bp, b_hash); 1385 1386 /* Disassociate us from our vnode, if we had one... */ 1387 if (vp != NULL) { 1388 mutex_enter(&vp->v_interlock); 1389 brelvp(bp); 1390 mutex_exit(&vp->v_interlock); 1391 } 1392 1393 return (bp); 1394} 1395 1396/* 1397 * Attempt to free an aged buffer off the queues. 1398 * Called with queue lock held. 1399 * Returns the amount of buffer memory freed. 1400 */ 1401static int 1402buf_trim(void) 1403{ 1404 buf_t *bp; 1405 long size = 0; 1406 1407 KASSERT(mutex_owned(&bufcache_lock)); 1408 1409 /* Instruct getnewbuf() to get buffers off the queues */ 1410 if ((bp = getnewbuf(PCATCH, 1, 1)) == NULL) 1411 return 0; 1412 1413 KASSERT((bp->b_cflags & BC_WANTED) == 0); 1414 size = bp->b_bufsize; 1415 bufmem -= size; 1416 if (size > 0) { 1417 buf_mrelease(bp->b_data, size); 1418 bp->b_bcount = bp->b_bufsize = 0; 1419 } 1420 /* brelse() will return the buffer to the global buffer pool */ 1421 brelsel(bp, 0); 1422 return size; 1423} 1424 1425int 1426buf_drain(int n) 1427{ 1428 int size = 0, sz; 1429 1430 KASSERT(mutex_owned(&bufcache_lock)); 1431 1432 while (size < n && bufmem > bufmem_lowater) { 1433 sz = buf_trim(); 1434 if (sz <= 0) 1435 break; 1436 size += sz; 1437 } 1438 1439 return size; 1440} 1441 1442/* 1443 * Wait for operations on the buffer to complete. 1444 * When they do, extract and return the I/O's error value. 1445 */ 1446int 1447biowait(buf_t *bp) 1448{ 1449 1450 mutex_enter(bp->b_objlock); 1451 while (!ISSET(bp->b_oflags, BO_DONE | BO_DELWRI)) 1452 cv_wait(&bp->b_done, bp->b_objlock); 1453 mutex_exit(bp->b_objlock); 1454 1455 return bp->b_error; 1456} 1457 1458/* 1459 * Mark I/O complete on a buffer. 1460 * 1461 * If a callback has been requested, e.g. the pageout 1462 * daemon, do so. Otherwise, awaken waiting processes. 1463 * 1464 * [ Leffler, et al., says on p.247: 1465 * "This routine wakes up the blocked process, frees the buffer 1466 * for an asynchronous write, or, for a request by the pagedaemon 1467 * process, invokes a procedure specified in the buffer structure" ] 1468 * 1469 * In real life, the pagedaemon (or other system processes) wants 1470 * to do async stuff to, and doesn't want the buffer brelse()'d. 1471 * (for swap pager, that puts swap buffers on the free lists (!!!), 1472 * for the vn device, that puts malloc'd buffers on the free lists!) 1473 */ 1474void 1475biodone(buf_t *bp) 1476{ 1477 int s; 1478 1479 KASSERT(!ISSET(bp->b_oflags, BO_DONE)); 1480 1481 if (cpu_intr_p()) { 1482 /* From interrupt mode: defer to a soft interrupt. */ 1483 s = splvm(); 1484 TAILQ_INSERT_TAIL(&curcpu()->ci_data.cpu_biodone, bp, b_actq); 1485 softint_schedule(biodone_sih); 1486 splx(s); 1487 } else { 1488 /* Process now - the buffer may be freed soon. */ 1489 biodone2(bp); 1490 } 1491} 1492 1493static void 1494biodone2(buf_t *bp) 1495{ 1496 void (*callout)(buf_t *); 1497 1498 if (bioopsp != NULL) 1499 (*bioopsp->io_complete)(bp); 1500 1501 mutex_enter(bp->b_objlock); 1502 /* Note that the transfer is done. */ 1503 if (ISSET(bp->b_oflags, BO_DONE)) 1504 panic("biodone2 already"); 1505 CLR(bp->b_flags, B_COWDONE); 1506 SET(bp->b_oflags, BO_DONE); 1507 BIO_SETPRIO(bp, BPRIO_DEFAULT); 1508 1509 /* Wake up waiting writers. */ 1510 if (!ISSET(bp->b_flags, B_READ)) 1511 vwakeup(bp); 1512 1513 if ((callout = bp->b_iodone) != NULL) { 1514 /* Note callout done, then call out. */ 1515 KERNEL_LOCK(1, NULL); /* XXXSMP */ 1516 bp->b_iodone = NULL; 1517 mutex_exit(bp->b_objlock); 1518 (*callout)(bp); 1519 KERNEL_UNLOCK_ONE(NULL); /* XXXSMP */ 1520 } else if (ISSET(bp->b_flags, B_ASYNC)) { 1521 /* If async, release. */ 1522 mutex_exit(bp->b_objlock); 1523 brelse(bp, 0); 1524 } else { 1525 /* Otherwise just wake up waiters in biowait(). */ 1526 cv_broadcast(&bp->b_done); 1527 mutex_exit(bp->b_objlock); 1528 } 1529} 1530 1531static void 1532biointr(void *cookie) 1533{ 1534 struct cpu_info *ci; 1535 buf_t *bp; 1536 int s; 1537 1538 ci = curcpu(); 1539 1540 while (!TAILQ_EMPTY(&ci->ci_data.cpu_biodone)) { 1541 KASSERT(curcpu() == ci); 1542 1543 s = splvm(); 1544 bp = TAILQ_FIRST(&ci->ci_data.cpu_biodone); 1545 TAILQ_REMOVE(&ci->ci_data.cpu_biodone, bp, b_actq); 1546 splx(s); 1547 1548 biodone2(bp); 1549 } 1550} 1551 1552/* 1553 * Return a count of buffers on the "locked" queue. 1554 */ 1555int 1556count_lock_queue(void) 1557{ 1558 buf_t *bp; 1559 int n = 0; 1560 1561 mutex_enter(&bufcache_lock); 1562 TAILQ_FOREACH(bp, &bufqueues[BQ_LOCKED].bq_queue, b_freelist) 1563 n++; 1564 mutex_exit(&bufcache_lock); 1565 return (n); 1566} 1567 1568/* 1569 * Wait for all buffers to complete I/O 1570 * Return the number of "stuck" buffers. 1571 */ 1572int 1573buf_syncwait(void) 1574{ 1575 buf_t *bp; 1576 int iter, nbusy, nbusy_prev = 0, dcount, ihash; 1577 1578 dcount = 10000; 1579 for (iter = 0; iter < 20;) { 1580 mutex_enter(&bufcache_lock); 1581 nbusy = 0; 1582 for (ihash = 0; ihash < bufhash+1; ihash++) { 1583 LIST_FOREACH(bp, &bufhashtbl[ihash], b_hash) { 1584 if ((bp->b_cflags & (BC_BUSY|BC_INVAL)) == BC_BUSY) 1585 nbusy += ((bp->b_flags & B_READ) == 0); 1586 /* 1587 * With soft updates, some buffers that are 1588 * written will be remarked as dirty until other 1589 * buffers are written. 1590 */ 1591 if (bp->b_vp && bp->b_vp->v_mount 1592 && (bp->b_vp->v_mount->mnt_flag & MNT_SOFTDEP) 1593 && (bp->b_oflags & BO_DELWRI)) { 1594 bremfree(bp); 1595 bp->b_cflags |= BC_BUSY; 1596 nbusy++; 1597 mutex_exit(&bufcache_lock); 1598 bawrite(bp); 1599 if (dcount-- <= 0) { 1600 printf("softdep "); 1601 goto fail; 1602 } 1603 mutex_enter(&bufcache_lock); 1604 } 1605 } 1606 } 1607 mutex_exit(&bufcache_lock); 1608 1609 if (nbusy == 0) 1610 break; 1611 if (nbusy_prev == 0) 1612 nbusy_prev = nbusy; 1613 printf("%d ", nbusy); 1614 tsleep(&nbusy, PRIBIO, "bflush", 1615 (iter == 0) ? 1 : hz / 25 * iter); 1616 if (nbusy >= nbusy_prev) /* we didn't flush anything */ 1617 iter++; 1618 else 1619 nbusy_prev = nbusy; 1620 } 1621 1622 if (nbusy) { 1623fail:; 1624#if defined(DEBUG) || defined(DEBUG_HALT_BUSY) 1625 printf("giving up\nPrinting vnodes for busy buffers\n"); 1626 for (ihash = 0; ihash < bufhash+1; ihash++) { 1627 LIST_FOREACH(bp, &bufhashtbl[ihash], b_hash) { 1628 if ((bp->b_cflags & (BC_BUSY|BC_INVAL)) == BC_BUSY && 1629 (bp->b_flags & B_READ) == 0) 1630 vprint(NULL, bp->b_vp); 1631 } 1632 } 1633#endif 1634 } 1635 1636 return nbusy; 1637} 1638 1639static void 1640sysctl_fillbuf(buf_t *i, struct buf_sysctl *o) 1641{ 1642 1643 o->b_flags = i->b_flags | i->b_cflags | i->b_oflags; 1644 o->b_error = i->b_error; 1645 o->b_prio = i->b_prio; 1646 o->b_dev = i->b_dev; 1647 o->b_bufsize = i->b_bufsize; 1648 o->b_bcount = i->b_bcount; 1649 o->b_resid = i->b_resid; 1650 o->b_addr = PTRTOUINT64(i->b_data); 1651 o->b_blkno = i->b_blkno; 1652 o->b_rawblkno = i->b_rawblkno; 1653 o->b_iodone = PTRTOUINT64(i->b_iodone); 1654 o->b_proc = PTRTOUINT64(i->b_proc); 1655 o->b_vp = PTRTOUINT64(i->b_vp); 1656 o->b_saveaddr = PTRTOUINT64(i->b_saveaddr); 1657 o->b_lblkno = i->b_lblkno; 1658} 1659 1660#define KERN_BUFSLOP 20 1661static int 1662sysctl_dobuf(SYSCTLFN_ARGS) 1663{ 1664 buf_t *bp; 1665 struct buf_sysctl bs; 1666 struct bqueue *bq; 1667 char *dp; 1668 u_int i, op, arg; 1669 size_t len, needed, elem_size, out_size; 1670 int error, elem_count, retries; 1671 1672 if (namelen == 1 && name[0] == CTL_QUERY) 1673 return (sysctl_query(SYSCTLFN_CALL(rnode))); 1674 1675 if (namelen != 4) 1676 return (EINVAL); 1677 1678 retries = 100; 1679 retry: 1680 dp = oldp; 1681 len = (oldp != NULL) ? *oldlenp : 0; 1682 op = name[0]; 1683 arg = name[1]; 1684 elem_size = name[2]; 1685 elem_count = name[3]; 1686 out_size = MIN(sizeof(bs), elem_size); 1687 1688 /* 1689 * at the moment, these are just "placeholders" to make the 1690 * API for retrieving kern.buf data more extensible in the 1691 * future. 1692 * 1693 * XXX kern.buf currently has "netbsd32" issues. hopefully 1694 * these will be resolved at a later point. 1695 */ 1696 if (op != KERN_BUF_ALL || arg != KERN_BUF_ALL || 1697 elem_size < 1 || elem_count < 0) 1698 return (EINVAL); 1699 1700 error = 0; 1701 needed = 0; 1702 sysctl_unlock(); 1703 mutex_enter(&bufcache_lock); 1704 for (i = 0; i < BQUEUES; i++) { 1705 bq = &bufqueues[i]; 1706 TAILQ_FOREACH(bp, &bq->bq_queue, b_freelist) { 1707 bq->bq_marker = bp; 1708 if (len >= elem_size && elem_count > 0) { 1709 sysctl_fillbuf(bp, &bs); 1710 mutex_exit(&bufcache_lock); 1711 error = copyout(&bs, dp, out_size); 1712 mutex_enter(&bufcache_lock); 1713 if (error) 1714 break; 1715 if (bq->bq_marker != bp) { 1716 /* 1717 * This sysctl node is only for 1718 * statistics. Retry; if the 1719 * queue keeps changing, then 1720 * bail out. 1721 */ 1722 if (retries-- == 0) { 1723 error = EAGAIN; 1724 break; 1725 } 1726 mutex_exit(&bufcache_lock); 1727 goto retry; 1728 } 1729 dp += elem_size; 1730 len -= elem_size; 1731 } 1732 if (elem_count > 0) { 1733 needed += elem_size; 1734 if (elem_count != INT_MAX) 1735 elem_count--; 1736 } 1737 } 1738 if (error != 0) 1739 break; 1740 } 1741 mutex_exit(&bufcache_lock); 1742 sysctl_relock(); 1743 1744 *oldlenp = needed; 1745 if (oldp == NULL) 1746 *oldlenp += KERN_BUFSLOP * sizeof(buf_t); 1747 1748 return (error); 1749} 1750 1751static int 1752sysctl_bufvm_update(SYSCTLFN_ARGS) 1753{ 1754 int t, error, rv; 1755 struct sysctlnode node; 1756 1757 node = *rnode; 1758 node.sysctl_data = &t; 1759 t = *(int *)rnode->sysctl_data; 1760 error = sysctl_lookup(SYSCTLFN_CALL(&node)); 1761 if (error || newp == NULL) 1762 return (error); 1763 1764 if (t < 0) 1765 return EINVAL; 1766 if (rnode->sysctl_data == &bufcache) { 1767 if (t > 100) 1768 return (EINVAL); 1769 bufcache = t; 1770 buf_setwm(); 1771 } else if (rnode->sysctl_data == &bufmem_lowater) { 1772 if (bufmem_hiwater - t < 16) 1773 return (EINVAL); 1774 bufmem_lowater = t; 1775 } else if (rnode->sysctl_data == &bufmem_hiwater) { 1776 if (t - bufmem_lowater < 16) 1777 return (EINVAL); 1778 bufmem_hiwater = t; 1779 } else 1780 return (EINVAL); 1781 1782 /* Drain until below new high water mark */ 1783 sysctl_unlock(); 1784 mutex_enter(&bufcache_lock); 1785 while ((t = bufmem - bufmem_hiwater) >= 0) { 1786 rv = buf_drain(t / (2 * 1024)); 1787 if (rv <= 0) 1788 break; 1789 } 1790 mutex_exit(&bufcache_lock); 1791 sysctl_relock(); 1792 1793 return 0; 1794} 1795 1796SYSCTL_SETUP(sysctl_kern_buf_setup, "sysctl kern.buf subtree setup") 1797{ 1798 1799 sysctl_createv(clog, 0, NULL, NULL, 1800 CTLFLAG_PERMANENT, 1801 CTLTYPE_NODE, "kern", NULL, 1802 NULL, 0, NULL, 0, 1803 CTL_KERN, CTL_EOL); 1804 sysctl_createv(clog, 0, NULL, NULL, 1805 CTLFLAG_PERMANENT, 1806 CTLTYPE_NODE, "buf", 1807 SYSCTL_DESCR("Kernel buffer cache information"), 1808 sysctl_dobuf, 0, NULL, 0, 1809 CTL_KERN, KERN_BUF, CTL_EOL); 1810} 1811 1812SYSCTL_SETUP(sysctl_vm_buf_setup, "sysctl vm.buf* subtree setup") 1813{ 1814 1815 sysctl_createv(clog, 0, NULL, NULL, 1816 CTLFLAG_PERMANENT, 1817 CTLTYPE_NODE, "vm", NULL, 1818 NULL, 0, NULL, 0, 1819 CTL_VM, CTL_EOL); 1820 1821 sysctl_createv(clog, 0, NULL, NULL, 1822 CTLFLAG_PERMANENT|CTLFLAG_READWRITE, 1823 CTLTYPE_INT, "bufcache", 1824 SYSCTL_DESCR("Percentage of physical memory to use for " 1825 "buffer cache"), 1826 sysctl_bufvm_update, 0, &bufcache, 0, 1827 CTL_VM, CTL_CREATE, CTL_EOL); 1828 sysctl_createv(clog, 0, NULL, NULL, 1829 CTLFLAG_PERMANENT|CTLFLAG_READONLY, 1830 CTLTYPE_INT, "bufmem", 1831 SYSCTL_DESCR("Amount of kernel memory used by buffer " 1832 "cache"), 1833 NULL, 0, &bufmem, 0, 1834 CTL_VM, CTL_CREATE, CTL_EOL); 1835 sysctl_createv(clog, 0, NULL, NULL, 1836 CTLFLAG_PERMANENT|CTLFLAG_READWRITE, 1837 CTLTYPE_INT, "bufmem_lowater", 1838 SYSCTL_DESCR("Minimum amount of kernel memory to " 1839 "reserve for buffer cache"), 1840 sysctl_bufvm_update, 0, &bufmem_lowater, 0, 1841 CTL_VM, CTL_CREATE, CTL_EOL); 1842 sysctl_createv(clog, 0, NULL, NULL, 1843 CTLFLAG_PERMANENT|CTLFLAG_READWRITE, 1844 CTLTYPE_INT, "bufmem_hiwater", 1845 SYSCTL_DESCR("Maximum amount of kernel memory to use " 1846 "for buffer cache"), 1847 sysctl_bufvm_update, 0, &bufmem_hiwater, 0, 1848 CTL_VM, CTL_CREATE, CTL_EOL); 1849} 1850 1851#ifdef DEBUG 1852/* 1853 * Print out statistics on the current allocation of the buffer pool. 1854 * Can be enabled to print out on every ``sync'' by setting "syncprt" 1855 * in vfs_syscalls.c using sysctl. 1856 */ 1857void 1858vfs_bufstats(void) 1859{ 1860 int i, j, count; 1861 buf_t *bp; 1862 struct bqueue *dp; 1863 int counts[(MAXBSIZE / PAGE_SIZE) + 1]; 1864 static const char *bname[BQUEUES] = { "LOCKED", "LRU", "AGE" }; 1865 1866 for (dp = bufqueues, i = 0; dp < &bufqueues[BQUEUES]; dp++, i++) { 1867 count = 0; 1868 for (j = 0; j <= MAXBSIZE/PAGE_SIZE; j++) 1869 counts[j] = 0; 1870 TAILQ_FOREACH(bp, &dp->bq_queue, b_freelist) { 1871 counts[bp->b_bufsize/PAGE_SIZE]++; 1872 count++; 1873 } 1874 printf("%s: total-%d", bname[i], count); 1875 for (j = 0; j <= MAXBSIZE/PAGE_SIZE; j++) 1876 if (counts[j] != 0) 1877 printf(", %d-%d", j * PAGE_SIZE, counts[j]); 1878 printf("\n"); 1879 } 1880} 1881#endif /* DEBUG */ 1882 1883/* ------------------------------ */ 1884 1885buf_t * 1886getiobuf(struct vnode *vp, bool waitok) 1887{ 1888 buf_t *bp; 1889 1890 bp = pool_cache_get(bufio_cache, (waitok ? PR_WAITOK : PR_NOWAIT)); 1891 if (bp == NULL) 1892 return bp; 1893 1894 buf_init(bp); 1895 1896 if ((bp->b_vp = vp) == NULL) 1897 bp->b_objlock = &buffer_lock; 1898 else 1899 bp->b_objlock = &vp->v_interlock; 1900 1901 return bp; 1902} 1903 1904void 1905putiobuf(buf_t *bp) 1906{ 1907 1908 buf_destroy(bp); 1909 pool_cache_put(bufio_cache, bp); 1910} 1911 1912/* 1913 * nestiobuf_iodone: b_iodone callback for nested buffers. 1914 */ 1915 1916void 1917nestiobuf_iodone(buf_t *bp) 1918{ 1919 buf_t *mbp = bp->b_private; 1920 int error; 1921 int donebytes; 1922 1923 KASSERT(bp->b_bcount <= bp->b_bufsize); 1924 KASSERT(mbp != bp); 1925 1926 error = 0; 1927 if (bp->b_error == 0 && 1928 (bp->b_bcount < bp->b_bufsize || bp->b_resid > 0)) { 1929 /* 1930 * Not all got transfered, raise an error. We have no way to 1931 * propagate these conditions to mbp. 1932 */ 1933 error = EIO; 1934 } 1935 1936 donebytes = bp->b_bufsize; 1937 1938 putiobuf(bp); 1939 nestiobuf_done(mbp, donebytes, error); 1940} 1941 1942/* 1943 * nestiobuf_setup: setup a "nested" buffer. 1944 * 1945 * => 'mbp' is a "master" buffer which is being divided into sub pieces. 1946 * => 'bp' should be a buffer allocated by getiobuf. 1947 * => 'offset' is a byte offset in the master buffer. 1948 * => 'size' is a size in bytes of this nested buffer. 1949 */ 1950 1951void 1952nestiobuf_setup(buf_t *mbp, buf_t *bp, int offset, size_t size) 1953{ 1954 const int b_read = mbp->b_flags & B_READ; 1955 struct vnode *vp = mbp->b_vp; 1956 1957 KASSERT(mbp->b_bcount >= offset + size); 1958 bp->b_vp = vp; 1959 bp->b_objlock = mbp->b_objlock; 1960 bp->b_cflags = BC_BUSY; 1961 bp->b_flags = B_ASYNC | b_read; 1962 bp->b_iodone = nestiobuf_iodone; 1963 bp->b_data = (char *)mbp->b_data + offset; 1964 bp->b_resid = bp->b_bcount = size; 1965 bp->b_bufsize = bp->b_bcount; 1966 bp->b_private = mbp; 1967 BIO_COPYPRIO(bp, mbp); 1968 if (!b_read && vp != NULL) { 1969 mutex_enter(&vp->v_interlock); 1970 vp->v_numoutput++; 1971 mutex_exit(&vp->v_interlock); 1972 } 1973} 1974 1975/* 1976 * nestiobuf_done: propagate completion to the master buffer. 1977 * 1978 * => 'donebytes' specifies how many bytes in the 'mbp' is completed. 1979 * => 'error' is an errno(2) that 'donebytes' has been completed with. 1980 */ 1981 1982void 1983nestiobuf_done(buf_t *mbp, int donebytes, int error) 1984{ 1985 1986 if (donebytes == 0) { 1987 return; 1988 } 1989 mutex_enter(mbp->b_objlock); 1990 KASSERT(mbp->b_resid >= donebytes); 1991 mbp->b_resid -= donebytes; 1992 mbp->b_error = error; 1993 if (mbp->b_resid == 0) { 1994 mutex_exit(mbp->b_objlock); 1995 biodone(mbp); 1996 } else 1997 mutex_exit(mbp->b_objlock); 1998} 1999 2000void 2001buf_init(buf_t *bp) 2002{ 2003 2004 LIST_INIT(&bp->b_dep); 2005 cv_init(&bp->b_busy, "biolock"); 2006 cv_init(&bp->b_done, "biowait"); 2007 bp->b_dev = NODEV; 2008 bp->b_error = 0; 2009 bp->b_flags = 0; 2010 bp->b_cflags = 0; 2011 bp->b_oflags = 0; 2012 bp->b_objlock = &buffer_lock; 2013 bp->b_iodone = NULL; 2014 BIO_SETPRIO(bp, BPRIO_DEFAULT); 2015} 2016 2017void 2018buf_destroy(buf_t *bp) 2019{ 2020 2021 cv_destroy(&bp->b_done); 2022 cv_destroy(&bp->b_busy); 2023} 2024 2025int 2026bbusy(buf_t *bp, bool intr, int timo, kmutex_t *interlock) 2027{ 2028 int error; 2029 2030 KASSERT(mutex_owned(&bufcache_lock)); 2031 2032 if ((bp->b_cflags & BC_BUSY) != 0) { 2033 if (curlwp == uvm.pagedaemon_lwp) 2034 return EDEADLK; 2035 bp->b_cflags |= BC_WANTED; 2036 bref(bp); 2037 if (interlock != NULL) 2038 mutex_exit(interlock); 2039 if (intr) { 2040 error = cv_timedwait_sig(&bp->b_busy, &bufcache_lock, 2041 timo); 2042 } else { 2043 error = cv_timedwait(&bp->b_busy, &bufcache_lock, 2044 timo); 2045 } 2046 brele(bp); 2047 if (interlock != NULL) 2048 mutex_enter(interlock); 2049 if (error != 0) 2050 return error; 2051 return EPASSTHROUGH; 2052 } 2053 bp->b_cflags |= BC_BUSY; 2054 2055 return 0; 2056} 2057