vfs_bio.c revision 1.133
1/* $NetBSD: vfs_bio.c,v 1.133 2004/10/03 08:30:09 enami Exp $ */ 2 3/*- 4 * Copyright (c) 1982, 1986, 1989, 1993 5 * The Regents of the University of California. All rights reserved. 6 * (c) UNIX System Laboratories, Inc. 7 * All or some portions of this file are derived from material licensed 8 * to the University of California by American Telephone and Telegraph 9 * Co. or Unix System Laboratories, Inc. and are reproduced herein with 10 * the permission of UNIX System Laboratories, Inc. 11 * 12 * Redistribution and use in source and binary forms, with or without 13 * modification, are permitted provided that the following conditions 14 * are met: 15 * 1. Redistributions of source code must retain the above copyright 16 * notice, this list of conditions and the following disclaimer. 17 * 2. Redistributions in binary form must reproduce the above copyright 18 * notice, this list of conditions and the following disclaimer in the 19 * documentation and/or other materials provided with the distribution. 20 * 3. Neither the name of the University nor the names of its contributors 21 * may be used to endorse or promote products derived from this software 22 * without specific prior written permission. 23 * 24 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 25 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 27 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 28 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 29 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 30 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 34 * SUCH DAMAGE. 35 * 36 * @(#)vfs_bio.c 8.6 (Berkeley) 1/11/94 37 */ 38 39/*- 40 * Copyright (c) 1994 Christopher G. Demetriou 41 * 42 * Redistribution and use in source and binary forms, with or without 43 * modification, are permitted provided that the following conditions 44 * are met: 45 * 1. Redistributions of source code must retain the above copyright 46 * notice, this list of conditions and the following disclaimer. 47 * 2. Redistributions in binary form must reproduce the above copyright 48 * notice, this list of conditions and the following disclaimer in the 49 * documentation and/or other materials provided with the distribution. 50 * 3. All advertising materials mentioning features or use of this software 51 * must display the following acknowledgement: 52 * This product includes software developed by the University of 53 * California, Berkeley and its contributors. 54 * 4. Neither the name of the University nor the names of its contributors 55 * may be used to endorse or promote products derived from this software 56 * without specific prior written permission. 57 * 58 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 59 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 60 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 61 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 62 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 63 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 64 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 65 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 66 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 67 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 68 * SUCH DAMAGE. 69 * 70 * @(#)vfs_bio.c 8.6 (Berkeley) 1/11/94 71 */ 72 73/* 74 * Some references: 75 * Bach: The Design of the UNIX Operating System (Prentice Hall, 1986) 76 * Leffler, et al.: The Design and Implementation of the 4.3BSD 77 * UNIX Operating System (Addison Welley, 1989) 78 */ 79 80#include "opt_bufcache.h" 81#include "opt_softdep.h" 82 83#include <sys/cdefs.h> 84__KERNEL_RCSID(0, "$NetBSD: vfs_bio.c,v 1.133 2004/10/03 08:30:09 enami Exp $"); 85 86#include <sys/param.h> 87#include <sys/systm.h> 88#include <sys/kernel.h> 89#include <sys/proc.h> 90#include <sys/buf.h> 91#include <sys/vnode.h> 92#include <sys/mount.h> 93#include <sys/malloc.h> 94#include <sys/resourcevar.h> 95#include <sys/sysctl.h> 96#include <sys/conf.h> 97 98#include <uvm/uvm.h> 99 100#include <miscfs/specfs/specdev.h> 101 102#ifndef BUFPAGES 103# define BUFPAGES 0 104#endif 105 106#ifdef BUFCACHE 107# if (BUFCACHE < 5) || (BUFCACHE > 95) 108# error BUFCACHE is not between 5 and 95 109# endif 110#else 111# define BUFCACHE 15 112#endif 113 114u_int nbuf; /* XXX - for softdep_lockedbufs */ 115u_int bufpages = BUFPAGES; /* optional hardwired count */ 116u_int bufcache = BUFCACHE; /* max % of RAM to use for buffer cache */ 117 118/* Function prototypes */ 119struct bqueue; 120 121static int buf_trim(void); 122static void *bufpool_page_alloc(struct pool *, int); 123static void bufpool_page_free(struct pool *, void *); 124static __inline struct buf *bio_doread(struct vnode *, daddr_t, int, 125 struct ucred *, int); 126static int buf_lotsfree(void); 127static int buf_canrelease(void); 128static __inline u_long buf_mempoolidx(u_long); 129static __inline u_long buf_roundsize(u_long); 130static __inline caddr_t buf_malloc(size_t); 131static void buf_mrelease(caddr_t, size_t); 132static __inline void binsheadfree(struct buf *, struct bqueue *); 133static __inline void binstailfree(struct buf *, struct bqueue *); 134int count_lock_queue(void); /* XXX */ 135#ifdef DEBUG 136static int checkfreelist(struct buf *, struct bqueue *); 137#endif 138 139/* Macros to clear/set/test flags. */ 140#define SET(t, f) (t) |= (f) 141#define CLR(t, f) (t) &= ~(f) 142#define ISSET(t, f) ((t) & (f)) 143 144/* 145 * Definitions for the buffer hash lists. 146 */ 147#define BUFHASH(dvp, lbn) \ 148 (&bufhashtbl[(((long)(dvp) >> 8) + (int)(lbn)) & bufhash]) 149LIST_HEAD(bufhashhdr, buf) *bufhashtbl, invalhash; 150u_long bufhash; 151#ifndef SOFTDEP 152struct bio_ops bioops; /* I/O operation notification */ 153#endif 154 155/* 156 * Insq/Remq for the buffer hash lists. 157 */ 158#define binshash(bp, dp) LIST_INSERT_HEAD(dp, bp, b_hash) 159#define bremhash(bp) LIST_REMOVE(bp, b_hash) 160 161/* 162 * Definitions for the buffer free lists. 163 */ 164#define BQUEUES 3 /* number of free buffer queues */ 165 166#define BQ_LOCKED 0 /* super-blocks &c */ 167#define BQ_LRU 1 /* lru, useful buffers */ 168#define BQ_AGE 2 /* rubbish */ 169 170struct bqueue { 171 TAILQ_HEAD(, buf) bq_queue; 172 uint64_t bq_bytes; 173} bufqueues[BQUEUES]; 174int needbuffer; 175 176/* 177 * Buffer queue lock. 178 * Take this lock first if also taking some buffer's b_interlock. 179 */ 180struct simplelock bqueue_slock = SIMPLELOCK_INITIALIZER; 181 182/* 183 * Buffer pool for I/O buffers. 184 */ 185struct pool bufpool; 186 187/* XXX - somewhat gross.. */ 188#if MAXBSIZE == 0x2000 189#define NMEMPOOLS 4 190#elif MAXBSIZE == 0x4000 191#define NMEMPOOLS 5 192#elif MAXBSIZE == 0x8000 193#define NMEMPOOLS 6 194#else 195#define NMEMPOOLS 7 196#endif 197 198#define MEMPOOL_INDEX_OFFSET 10 /* smallest pool is 1k */ 199#if (1 << (NMEMPOOLS + MEMPOOL_INDEX_OFFSET - 1)) != MAXBSIZE 200#error update vfs_bio buffer memory parameters 201#endif 202 203/* Buffer memory pools */ 204static struct pool bmempools[NMEMPOOLS]; 205 206struct vm_map *buf_map; 207 208/* 209 * Buffer memory pool allocator. 210 */ 211static void * 212bufpool_page_alloc(struct pool *pp, int flags) 213{ 214 215 return (void *)uvm_km_kmemalloc1(buf_map, 216 uvm.kernel_object, MAXBSIZE, MAXBSIZE, UVM_UNKNOWN_OFFSET, 217 (flags & PR_WAITOK) ? 0 : UVM_KMF_NOWAIT | UVM_KMF_TRYLOCK); 218} 219 220static void 221bufpool_page_free(struct pool *pp, void *v) 222{ 223 uvm_km_free(buf_map, (vaddr_t)v, MAXBSIZE); 224} 225 226static struct pool_allocator bufmempool_allocator = { 227 bufpool_page_alloc, bufpool_page_free, MAXBSIZE, 228}; 229 230/* Buffer memory management variables */ 231u_long bufmem_valimit; 232u_long bufmem_hiwater; 233u_long bufmem_lowater; 234u_long bufmem; 235 236/* 237 * MD code can call this to set a hard limit on the amount 238 * of virtual memory used by the buffer cache. 239 */ 240int 241buf_setvalimit(vsize_t sz) 242{ 243 244 /* We need to accommodate at least NMEMPOOLS of MAXBSIZE each */ 245 if (sz < NMEMPOOLS * MAXBSIZE) 246 return EINVAL; 247 248 bufmem_valimit = sz; 249 return 0; 250} 251 252#ifdef DEBUG 253int debug_verify_freelist = 0; 254static int 255checkfreelist(struct buf *bp, struct bqueue *dp) 256{ 257 struct buf *b; 258 259 TAILQ_FOREACH(b, &dp->bq_queue, b_freelist) { 260 if (b == bp) 261 return 1; 262 } 263 return 0; 264} 265#endif 266 267/* 268 * Insq/Remq for the buffer hash lists. 269 * Call with buffer queue locked. 270 */ 271static __inline void 272binsheadfree(struct buf *bp, struct bqueue *dp) 273{ 274 275 KASSERT(bp->b_freelistindex == -1); 276 TAILQ_INSERT_HEAD(&dp->bq_queue, bp, b_freelist); 277 dp->bq_bytes += bp->b_bufsize; 278 bp->b_freelistindex = dp - bufqueues; 279} 280 281static __inline void 282binstailfree(struct buf *bp, struct bqueue *dp) 283{ 284 285 KASSERT(bp->b_freelistindex == -1); 286 TAILQ_INSERT_TAIL(&dp->bq_queue, bp, b_freelist); 287 dp->bq_bytes += bp->b_bufsize; 288 bp->b_freelistindex = dp - bufqueues; 289} 290 291void 292bremfree(struct buf *bp) 293{ 294 struct bqueue *dp; 295 int bqidx = bp->b_freelistindex; 296 297 LOCK_ASSERT(simple_lock_held(&bqueue_slock)); 298 299 KASSERT(bqidx != -1); 300 dp = &bufqueues[bqidx]; 301 KDASSERT(!debug_verify_freelist || checkfreelist(bp, dp)); 302 KASSERT(dp->bq_bytes >= bp->b_bufsize); 303 TAILQ_REMOVE(&dp->bq_queue, bp, b_freelist); 304 dp->bq_bytes -= bp->b_bufsize; 305#if defined(DIAGNOSTIC) 306 bp->b_freelistindex = -1; 307#endif /* defined(DIAGNOSTIC) */ 308} 309 310u_long 311buf_memcalc(void) 312{ 313 u_long n; 314 315 /* 316 * Determine the upper bound of memory to use for buffers. 317 * 318 * - If bufpages is specified, use that as the number 319 * pages. 320 * 321 * - Otherwise, use bufcache as the percentage of 322 * physical memory. 323 */ 324 if (bufpages != 0) { 325 n = bufpages; 326 } else { 327 if (bufcache < 5) { 328 printf("forcing bufcache %d -> 5", bufcache); 329 bufcache = 5; 330 } 331 if (bufcache > 95) { 332 printf("forcing bufcache %d -> 95", bufcache); 333 bufcache = 95; 334 } 335 n = physmem / 100 * bufcache; 336 } 337 338 n <<= PAGE_SHIFT; 339 if (bufmem_valimit != 0 && n > bufmem_valimit) 340 n = bufmem_valimit; 341 342 return (n); 343} 344 345/* 346 * Initialize buffers and hash links for buffers. 347 */ 348void 349bufinit(void) 350{ 351 struct bqueue *dp; 352 int use_std; 353 u_int i; 354 355 /* 356 * Initialize buffer cache memory parameters. 357 */ 358 bufmem = 0; 359 bufmem_hiwater = buf_memcalc(); 360 /* lowater is approx. 2% of memory (with bufcache=15) */ 361 bufmem_lowater = (bufmem_hiwater >> 3); 362 if (bufmem_lowater < 64 * 1024) 363 /* Ensure a reasonable minimum value */ 364 bufmem_lowater = 64 * 1024; 365 366 if (bufmem_valimit != 0) { 367 vaddr_t minaddr = 0, maxaddr; 368 buf_map = uvm_km_suballoc(kernel_map, &minaddr, &maxaddr, 369 bufmem_valimit, VM_MAP_PAGEABLE, 370 FALSE, 0); 371 if (buf_map == NULL) 372 panic("bufinit: cannot allocate submap"); 373 } else 374 buf_map = kernel_map; 375 376 /* 377 * Initialize the buffer pools. 378 */ 379 pool_init(&bufpool, sizeof(struct buf), 0, 0, 0, "bufpl", NULL); 380 381 /* On "small" machines use small pool page sizes where possible */ 382 use_std = (physmem < atop(16*1024*1024)); 383 384 /* 385 * Also use them on systems that can map the pool pages using 386 * a direct-mapped segment. 387 */ 388#ifdef PMAP_MAP_POOLPAGE 389 use_std = 1; 390#endif 391 392 for (i = 0; i < NMEMPOOLS; i++) { 393 struct pool_allocator *pa; 394 struct pool *pp = &bmempools[i]; 395 u_int size = 1 << (i + MEMPOOL_INDEX_OFFSET); 396 char *name = malloc(8, M_TEMP, M_WAITOK); 397 snprintf(name, 8, "buf%dk", 1 << i); 398 pa = (size <= PAGE_SIZE && use_std) 399 ? &pool_allocator_nointr 400 : &bufmempool_allocator; 401 pool_init(pp, size, 0, 0, 0, name, pa); 402 pool_setlowat(pp, 1); 403 pool_sethiwat(pp, 1); 404 } 405 406 /* Initialize the buffer queues */ 407 for (dp = bufqueues; dp < &bufqueues[BQUEUES]; dp++) { 408 TAILQ_INIT(&dp->bq_queue); 409 dp->bq_bytes = 0; 410 } 411 412 /* 413 * Estimate hash table size based on the amount of memory we 414 * intend to use for the buffer cache. The average buffer 415 * size is dependent on our clients (i.e. filesystems). 416 * 417 * For now, use an empirical 3K per buffer. 418 */ 419 nbuf = (bufmem_hiwater / 1024) / 3; 420 bufhashtbl = hashinit(nbuf, HASH_LIST, M_CACHE, M_WAITOK, &bufhash); 421} 422 423static int 424buf_lotsfree(void) 425{ 426 int try, thresh; 427 struct lwp *l = curlwp; 428 429 /* Always allocate if doing copy on write */ 430 if (l->l_flag & L_COWINPROGRESS) 431 return 1; 432 433 /* Always allocate if less than the low water mark. */ 434 if (bufmem < bufmem_lowater) 435 return 1; 436 437 /* Never allocate if greater than the high water mark. */ 438 if (bufmem > bufmem_hiwater) 439 return 0; 440 441 /* If there's anything on the AGE list, it should be eaten. */ 442 if (TAILQ_FIRST(&bufqueues[BQ_AGE].bq_queue) != NULL) 443 return 0; 444 445 /* 446 * The probabily of getting a new allocation is inversely 447 * proportional to the current size of the cache, using 448 * a granularity of 16 steps. 449 */ 450 try = random() & 0x0000000fL; 451 452 /* Don't use "16 * bufmem" here to avoid a 32-bit overflow. */ 453 thresh = bufmem / (bufmem_hiwater / 16); 454 455 if (try >= thresh && uvmexp.free > (2 * uvmexp.freetarg)) 456 return 1; 457 458 /* Otherwise don't allocate. */ 459 return 0; 460} 461 462/* 463 * Return estimate of bytes we think need to be 464 * released to help resolve low memory conditions. 465 * 466 * => called at splbio. 467 * => called with bqueue_slock held. 468 */ 469static int 470buf_canrelease(void) 471{ 472 int pagedemand, ninvalid = 0; 473 474 LOCK_ASSERT(simple_lock_held(&bqueue_slock)); 475 476 if (bufmem < bufmem_lowater) 477 return 0; 478 479 ninvalid += bufqueues[BQ_AGE].bq_bytes; 480 481 pagedemand = uvmexp.freetarg - uvmexp.free; 482 if (pagedemand < 0) 483 return ninvalid; 484 return MAX(ninvalid, MIN(2 * MAXBSIZE, 485 MIN((bufmem - bufmem_lowater) / 16, pagedemand * PAGE_SIZE))); 486} 487 488/* 489 * Buffer memory allocation helper functions 490 */ 491static __inline u_long 492buf_mempoolidx(u_long size) 493{ 494 u_int n = 0; 495 496 size -= 1; 497 size >>= MEMPOOL_INDEX_OFFSET; 498 while (size) { 499 size >>= 1; 500 n += 1; 501 } 502 if (n >= NMEMPOOLS) 503 panic("buf mem pool index %d", n); 504 return n; 505} 506 507static __inline u_long 508buf_roundsize(u_long size) 509{ 510 /* Round up to nearest power of 2 */ 511 return (1 << (buf_mempoolidx(size) + MEMPOOL_INDEX_OFFSET)); 512} 513 514static __inline caddr_t 515buf_malloc(size_t size) 516{ 517 u_int n = buf_mempoolidx(size); 518 caddr_t addr; 519 int s; 520 521 while (1) { 522 addr = pool_get(&bmempools[n], PR_NOWAIT); 523 if (addr != NULL) 524 break; 525 526 /* No memory, see if we can free some. If so, try again */ 527 if (buf_drain(1) > 0) 528 continue; 529 530 /* Wait for buffers to arrive on the LRU queue */ 531 s = splbio(); 532 simple_lock(&bqueue_slock); 533 needbuffer = 1; 534 ltsleep(&needbuffer, PNORELOCK | (PRIBIO + 1), 535 "buf_malloc", 0, &bqueue_slock); 536 splx(s); 537 } 538 539 return addr; 540} 541 542static void 543buf_mrelease(caddr_t addr, size_t size) 544{ 545 546 pool_put(&bmempools[buf_mempoolidx(size)], addr); 547} 548 549/* 550 * bread()/breadn() helper. 551 */ 552static __inline struct buf * 553bio_doread(struct vnode *vp, daddr_t blkno, int size, struct ucred *cred, 554 int async) 555{ 556 struct buf *bp; 557 struct lwp *l = (curlwp != NULL ? curlwp : &lwp0); /* XXX */ 558 struct proc *p = l->l_proc; 559 struct mount *mp; 560 561 bp = getblk(vp, blkno, size, 0, 0); 562 563#ifdef DIAGNOSTIC 564 if (bp == NULL) { 565 panic("bio_doread: no such buf"); 566 } 567#endif 568 569 /* 570 * If buffer does not have data valid, start a read. 571 * Note that if buffer is B_INVAL, getblk() won't return it. 572 * Therefore, it's valid if its I/O has completed or been delayed. 573 */ 574 if (!ISSET(bp->b_flags, (B_DONE | B_DELWRI))) { 575 /* Start I/O for the buffer. */ 576 SET(bp->b_flags, B_READ | async); 577 if (async) 578 BIO_SETPRIO(bp, BPRIO_TIMELIMITED); 579 else 580 BIO_SETPRIO(bp, BPRIO_TIMECRITICAL); 581 VOP_STRATEGY(vp, bp); 582 583 /* Pay for the read. */ 584 p->p_stats->p_ru.ru_inblock++; 585 } else if (async) { 586 brelse(bp); 587 } 588 589 if (vp->v_type == VBLK) 590 mp = vp->v_specmountpoint; 591 else 592 mp = vp->v_mount; 593 594 /* 595 * Collect statistics on synchronous and asynchronous reads. 596 * Reads from block devices are charged to their associated 597 * filesystem (if any). 598 */ 599 if (mp != NULL) { 600 if (async == 0) 601 mp->mnt_stat.f_syncreads++; 602 else 603 mp->mnt_stat.f_asyncreads++; 604 } 605 606 return (bp); 607} 608 609/* 610 * Read a disk block. 611 * This algorithm described in Bach (p.54). 612 */ 613int 614bread(struct vnode *vp, daddr_t blkno, int size, struct ucred *cred, 615 struct buf **bpp) 616{ 617 struct buf *bp; 618 619 /* Get buffer for block. */ 620 bp = *bpp = bio_doread(vp, blkno, size, cred, 0); 621 622 /* Wait for the read to complete, and return result. */ 623 return (biowait(bp)); 624} 625 626/* 627 * Read-ahead multiple disk blocks. The first is sync, the rest async. 628 * Trivial modification to the breada algorithm presented in Bach (p.55). 629 */ 630int 631breadn(struct vnode *vp, daddr_t blkno, int size, daddr_t *rablks, 632 int *rasizes, int nrablks, struct ucred *cred, struct buf **bpp) 633{ 634 struct buf *bp; 635 int i; 636 637 bp = *bpp = bio_doread(vp, blkno, size, cred, 0); 638 639 /* 640 * For each of the read-ahead blocks, start a read, if necessary. 641 */ 642 for (i = 0; i < nrablks; i++) { 643 /* If it's in the cache, just go on to next one. */ 644 if (incore(vp, rablks[i])) 645 continue; 646 647 /* Get a buffer for the read-ahead block */ 648 (void) bio_doread(vp, rablks[i], rasizes[i], cred, B_ASYNC); 649 } 650 651 /* Otherwise, we had to start a read for it; wait until it's valid. */ 652 return (biowait(bp)); 653} 654 655/* 656 * Read with single-block read-ahead. Defined in Bach (p.55), but 657 * implemented as a call to breadn(). 658 * XXX for compatibility with old file systems. 659 */ 660int 661breada(struct vnode *vp, daddr_t blkno, int size, daddr_t rablkno, 662 int rabsize, struct ucred *cred, struct buf **bpp) 663{ 664 665 return (breadn(vp, blkno, size, &rablkno, &rabsize, 1, cred, bpp)); 666} 667 668/* 669 * Block write. Described in Bach (p.56) 670 */ 671int 672bwrite(struct buf *bp) 673{ 674 int rv, sync, wasdelayed, s; 675 struct lwp *l = (curlwp != NULL ? curlwp : &lwp0); /* XXX */ 676 struct proc *p = l->l_proc; 677 struct vnode *vp; 678 struct mount *mp; 679 680 KASSERT(ISSET(bp->b_flags, B_BUSY)); 681 682 vp = bp->b_vp; 683 if (vp != NULL) { 684 if (vp->v_type == VBLK) 685 mp = vp->v_specmountpoint; 686 else 687 mp = vp->v_mount; 688 } else { 689 mp = NULL; 690 } 691 692 /* 693 * Remember buffer type, to switch on it later. If the write was 694 * synchronous, but the file system was mounted with MNT_ASYNC, 695 * convert it to a delayed write. 696 * XXX note that this relies on delayed tape writes being converted 697 * to async, not sync writes (which is safe, but ugly). 698 */ 699 sync = !ISSET(bp->b_flags, B_ASYNC); 700 if (sync && mp != NULL && ISSET(mp->mnt_flag, MNT_ASYNC)) { 701 bdwrite(bp); 702 return (0); 703 } 704 705 /* 706 * Collect statistics on synchronous and asynchronous writes. 707 * Writes to block devices are charged to their associated 708 * filesystem (if any). 709 */ 710 if (mp != NULL) { 711 if (sync) 712 mp->mnt_stat.f_syncwrites++; 713 else 714 mp->mnt_stat.f_asyncwrites++; 715 } 716 717 s = splbio(); 718 simple_lock(&bp->b_interlock); 719 720 wasdelayed = ISSET(bp->b_flags, B_DELWRI); 721 722 CLR(bp->b_flags, (B_READ | B_DONE | B_ERROR | B_DELWRI)); 723 724 /* 725 * Pay for the I/O operation and make sure the buf is on the correct 726 * vnode queue. 727 */ 728 if (wasdelayed) 729 reassignbuf(bp, bp->b_vp); 730 else 731 p->p_stats->p_ru.ru_oublock++; 732 733 /* Initiate disk write. Make sure the appropriate party is charged. */ 734 V_INCR_NUMOUTPUT(bp->b_vp); 735 simple_unlock(&bp->b_interlock); 736 splx(s); 737 738 if (sync) 739 BIO_SETPRIO(bp, BPRIO_TIMECRITICAL); 740 else 741 BIO_SETPRIO(bp, BPRIO_TIMELIMITED); 742 743 VOP_STRATEGY(vp, bp); 744 745 if (sync) { 746 /* If I/O was synchronous, wait for it to complete. */ 747 rv = biowait(bp); 748 749 /* Release the buffer. */ 750 brelse(bp); 751 752 return (rv); 753 } else { 754 return (0); 755 } 756} 757 758int 759vn_bwrite(void *v) 760{ 761 struct vop_bwrite_args *ap = v; 762 763 return (bwrite(ap->a_bp)); 764} 765 766/* 767 * Delayed write. 768 * 769 * The buffer is marked dirty, but is not queued for I/O. 770 * This routine should be used when the buffer is expected 771 * to be modified again soon, typically a small write that 772 * partially fills a buffer. 773 * 774 * NB: magnetic tapes cannot be delayed; they must be 775 * written in the order that the writes are requested. 776 * 777 * Described in Leffler, et al. (pp. 208-213). 778 */ 779void 780bdwrite(struct buf *bp) 781{ 782 struct lwp *l = (curlwp != NULL ? curlwp : &lwp0); /* XXX */ 783 struct proc *p = l->l_proc; 784 const struct bdevsw *bdev; 785 int s; 786 787 /* If this is a tape block, write the block now. */ 788 bdev = bdevsw_lookup(bp->b_dev); 789 if (bdev != NULL && bdev->d_type == D_TAPE) { 790 bawrite(bp); 791 return; 792 } 793 794 /* 795 * If the block hasn't been seen before: 796 * (1) Mark it as having been seen, 797 * (2) Charge for the write, 798 * (3) Make sure it's on its vnode's correct block list. 799 */ 800 s = splbio(); 801 simple_lock(&bp->b_interlock); 802 803 KASSERT(ISSET(bp->b_flags, B_BUSY)); 804 805 if (!ISSET(bp->b_flags, B_DELWRI)) { 806 SET(bp->b_flags, B_DELWRI); 807 p->p_stats->p_ru.ru_oublock++; 808 reassignbuf(bp, bp->b_vp); 809 } 810 811 /* Otherwise, the "write" is done, so mark and release the buffer. */ 812 CLR(bp->b_flags, B_DONE); 813 simple_unlock(&bp->b_interlock); 814 splx(s); 815 816 brelse(bp); 817} 818 819/* 820 * Asynchronous block write; just an asynchronous bwrite(). 821 */ 822void 823bawrite(struct buf *bp) 824{ 825 int s; 826 827 s = splbio(); 828 simple_lock(&bp->b_interlock); 829 830 KASSERT(ISSET(bp->b_flags, B_BUSY)); 831 832 SET(bp->b_flags, B_ASYNC); 833 simple_unlock(&bp->b_interlock); 834 splx(s); 835 VOP_BWRITE(bp); 836} 837 838/* 839 * Same as first half of bdwrite, mark buffer dirty, but do not release it. 840 * Call at splbio() and with the buffer interlock locked. 841 * Note: called only from biodone() through ffs softdep's bioops.io_complete() 842 */ 843void 844bdirty(struct buf *bp) 845{ 846 struct lwp *l = (curlwp != NULL ? curlwp : &lwp0); /* XXX */ 847 struct proc *p = l->l_proc; 848 849 LOCK_ASSERT(simple_lock_held(&bp->b_interlock)); 850 KASSERT(ISSET(bp->b_flags, B_BUSY)); 851 852 CLR(bp->b_flags, B_AGE); 853 854 if (!ISSET(bp->b_flags, B_DELWRI)) { 855 SET(bp->b_flags, B_DELWRI); 856 p->p_stats->p_ru.ru_oublock++; 857 reassignbuf(bp, bp->b_vp); 858 } 859} 860 861/* 862 * Release a buffer on to the free lists. 863 * Described in Bach (p. 46). 864 */ 865void 866brelse(struct buf *bp) 867{ 868 struct bqueue *bufq; 869 int s; 870 871 /* Block disk interrupts. */ 872 s = splbio(); 873 simple_lock(&bqueue_slock); 874 simple_lock(&bp->b_interlock); 875 876 KASSERT(ISSET(bp->b_flags, B_BUSY)); 877 KASSERT(!ISSET(bp->b_flags, B_CALL)); 878 879 /* Wake up any processes waiting for any buffer to become free. */ 880 if (needbuffer) { 881 needbuffer = 0; 882 wakeup(&needbuffer); 883 } 884 885 /* Wake up any proceeses waiting for _this_ buffer to become free. */ 886 if (ISSET(bp->b_flags, B_WANTED)) { 887 CLR(bp->b_flags, B_WANTED|B_AGE); 888 wakeup(bp); 889 } 890 891 /* 892 * Determine which queue the buffer should be on, then put it there. 893 */ 894 895 /* If it's locked, don't report an error; try again later. */ 896 if (ISSET(bp->b_flags, (B_LOCKED|B_ERROR)) == (B_LOCKED|B_ERROR)) 897 CLR(bp->b_flags, B_ERROR); 898 899 /* If it's not cacheable, or an error, mark it invalid. */ 900 if (ISSET(bp->b_flags, (B_NOCACHE|B_ERROR))) 901 SET(bp->b_flags, B_INVAL); 902 903 if (ISSET(bp->b_flags, B_VFLUSH)) { 904 /* 905 * This is a delayed write buffer that was just flushed to 906 * disk. It is still on the LRU queue. If it's become 907 * invalid, then we need to move it to a different queue; 908 * otherwise leave it in its current position. 909 */ 910 CLR(bp->b_flags, B_VFLUSH); 911 if (!ISSET(bp->b_flags, B_ERROR|B_INVAL|B_LOCKED|B_AGE)) { 912 KDASSERT(!debug_verify_freelist || checkfreelist(bp, &bufqueues[BQ_LRU])); 913 goto already_queued; 914 } else { 915 bremfree(bp); 916 } 917 } 918 919 KDASSERT(!debug_verify_freelist || !checkfreelist(bp, &bufqueues[BQ_AGE])); 920 KDASSERT(!debug_verify_freelist || !checkfreelist(bp, &bufqueues[BQ_LRU])); 921 KDASSERT(!debug_verify_freelist || !checkfreelist(bp, &bufqueues[BQ_LOCKED])); 922 923 if ((bp->b_bufsize <= 0) || ISSET(bp->b_flags, B_INVAL)) { 924 /* 925 * If it's invalid or empty, dissociate it from its vnode 926 * and put on the head of the appropriate queue. 927 */ 928 if (LIST_FIRST(&bp->b_dep) != NULL && bioops.io_deallocate) 929 (*bioops.io_deallocate)(bp); 930 CLR(bp->b_flags, B_DONE|B_DELWRI); 931 if (bp->b_vp) { 932 reassignbuf(bp, bp->b_vp); 933 brelvp(bp); 934 } 935 if (bp->b_bufsize <= 0) 936 /* no data */ 937 goto already_queued; 938 else 939 /* invalid data */ 940 bufq = &bufqueues[BQ_AGE]; 941 binsheadfree(bp, bufq); 942 } else { 943 /* 944 * It has valid data. Put it on the end of the appropriate 945 * queue, so that it'll stick around for as long as possible. 946 * If buf is AGE, but has dependencies, must put it on last 947 * bufqueue to be scanned, ie LRU. This protects against the 948 * livelock where BQ_AGE only has buffers with dependencies, 949 * and we thus never get to the dependent buffers in BQ_LRU. 950 */ 951 if (ISSET(bp->b_flags, B_LOCKED)) 952 /* locked in core */ 953 bufq = &bufqueues[BQ_LOCKED]; 954 else if (!ISSET(bp->b_flags, B_AGE)) 955 /* valid data */ 956 bufq = &bufqueues[BQ_LRU]; 957 else { 958 /* stale but valid data */ 959 int has_deps; 960 961 if (LIST_FIRST(&bp->b_dep) != NULL && 962 bioops.io_countdeps) 963 has_deps = (*bioops.io_countdeps)(bp, 0); 964 else 965 has_deps = 0; 966 bufq = has_deps ? &bufqueues[BQ_LRU] : 967 &bufqueues[BQ_AGE]; 968 } 969 binstailfree(bp, bufq); 970 } 971 972already_queued: 973 /* Unlock the buffer. */ 974 CLR(bp->b_flags, B_AGE|B_ASYNC|B_BUSY|B_NOCACHE); 975 SET(bp->b_flags, B_CACHE); 976 977 /* Allow disk interrupts. */ 978 simple_unlock(&bp->b_interlock); 979 simple_unlock(&bqueue_slock); 980 if (bp->b_bufsize <= 0) { 981#ifdef DEBUG 982 memset((char *)bp, 0, sizeof(*bp)); 983#endif 984 pool_put(&bufpool, bp); 985 } 986 splx(s); 987} 988 989/* 990 * Determine if a block is in the cache. 991 * Just look on what would be its hash chain. If it's there, return 992 * a pointer to it, unless it's marked invalid. If it's marked invalid, 993 * we normally don't return the buffer, unless the caller explicitly 994 * wants us to. 995 */ 996struct buf * 997incore(struct vnode *vp, daddr_t blkno) 998{ 999 struct buf *bp; 1000 1001 /* Search hash chain */ 1002 LIST_FOREACH(bp, BUFHASH(vp, blkno), b_hash) { 1003 if (bp->b_lblkno == blkno && bp->b_vp == vp && 1004 !ISSET(bp->b_flags, B_INVAL)) 1005 return (bp); 1006 } 1007 1008 return (NULL); 1009} 1010 1011/* 1012 * Get a block of requested size that is associated with 1013 * a given vnode and block offset. If it is found in the 1014 * block cache, mark it as having been found, make it busy 1015 * and return it. Otherwise, return an empty block of the 1016 * correct size. It is up to the caller to insure that the 1017 * cached blocks be of the correct size. 1018 */ 1019struct buf * 1020getblk(struct vnode *vp, daddr_t blkno, int size, int slpflag, int slptimeo) 1021{ 1022 struct buf *bp; 1023 int s, err; 1024 int preserve; 1025 1026start: 1027 s = splbio(); 1028 simple_lock(&bqueue_slock); 1029 bp = incore(vp, blkno); 1030 if (bp != NULL) { 1031 simple_lock(&bp->b_interlock); 1032 if (ISSET(bp->b_flags, B_BUSY)) { 1033 simple_unlock(&bqueue_slock); 1034 if (curproc == uvm.pagedaemon_proc) { 1035 simple_unlock(&bp->b_interlock); 1036 splx(s); 1037 return NULL; 1038 } 1039 SET(bp->b_flags, B_WANTED); 1040 err = ltsleep(bp, slpflag | (PRIBIO + 1) | PNORELOCK, 1041 "getblk", slptimeo, &bp->b_interlock); 1042 splx(s); 1043 if (err) 1044 return (NULL); 1045 goto start; 1046 } 1047#ifdef DIAGNOSTIC 1048 if (ISSET(bp->b_flags, B_DONE|B_DELWRI) && 1049 bp->b_bcount < size && vp->v_type != VBLK) 1050 panic("getblk: block size invariant failed"); 1051#endif 1052 SET(bp->b_flags, B_BUSY); 1053 bremfree(bp); 1054 preserve = 1; 1055 } else { 1056 if ((bp = getnewbuf(slpflag, slptimeo, 0)) == NULL) { 1057 simple_unlock(&bqueue_slock); 1058 splx(s); 1059 goto start; 1060 } 1061 1062 binshash(bp, BUFHASH(vp, blkno)); 1063 bp->b_blkno = bp->b_lblkno = bp->b_rawblkno = blkno; 1064 bgetvp(vp, bp); 1065 preserve = 0; 1066 } 1067 simple_unlock(&bp->b_interlock); 1068 simple_unlock(&bqueue_slock); 1069 splx(s); 1070 /* 1071 * LFS can't track total size of B_LOCKED buffer (locked_queue_bytes) 1072 * if we re-size buffers here. 1073 */ 1074 if (ISSET(bp->b_flags, B_LOCKED)) { 1075 KASSERT(bp->b_bufsize >= size); 1076 } else { 1077 allocbuf(bp, size, preserve); 1078 } 1079 BIO_SETPRIO(bp, BPRIO_DEFAULT); 1080 return (bp); 1081} 1082 1083/* 1084 * Get an empty, disassociated buffer of given size. 1085 */ 1086struct buf * 1087geteblk(int size) 1088{ 1089 struct buf *bp; 1090 int s; 1091 1092 s = splbio(); 1093 simple_lock(&bqueue_slock); 1094 while ((bp = getnewbuf(0, 0, 0)) == 0) 1095 ; 1096 1097 SET(bp->b_flags, B_INVAL); 1098 binshash(bp, &invalhash); 1099 simple_unlock(&bqueue_slock); 1100 simple_unlock(&bp->b_interlock); 1101 splx(s); 1102 BIO_SETPRIO(bp, BPRIO_DEFAULT); 1103 allocbuf(bp, size, 0); 1104 return (bp); 1105} 1106 1107/* 1108 * Expand or contract the actual memory allocated to a buffer. 1109 * 1110 * If the buffer shrinks, data is lost, so it's up to the 1111 * caller to have written it out *first*; this routine will not 1112 * start a write. If the buffer grows, it's the callers 1113 * responsibility to fill out the buffer's additional contents. 1114 */ 1115void 1116allocbuf(struct buf *bp, int size, int preserve) 1117{ 1118 vsize_t oldsize, desired_size; 1119 caddr_t addr; 1120 int s, delta; 1121 1122 desired_size = buf_roundsize(size); 1123 if (desired_size > MAXBSIZE) 1124 printf("allocbuf: buffer larger than MAXBSIZE requested"); 1125 1126 bp->b_bcount = size; 1127 1128 oldsize = bp->b_bufsize; 1129 if (oldsize == desired_size) 1130 return; 1131 1132 /* 1133 * If we want a buffer of a different size, re-allocate the 1134 * buffer's memory; copy old content only if needed. 1135 */ 1136 addr = buf_malloc(desired_size); 1137 if (preserve) 1138 memcpy(addr, bp->b_data, MIN(oldsize,desired_size)); 1139 if (bp->b_data != NULL) 1140 buf_mrelease(bp->b_data, oldsize); 1141 bp->b_data = addr; 1142 bp->b_bufsize = desired_size; 1143 1144 /* 1145 * Update overall buffer memory counter (protected by bqueue_slock) 1146 */ 1147 delta = (long)desired_size - (long)oldsize; 1148 1149 s = splbio(); 1150 simple_lock(&bqueue_slock); 1151 if ((bufmem += delta) > bufmem_hiwater) { 1152 /* 1153 * Need to trim overall memory usage. 1154 */ 1155 while (buf_canrelease()) { 1156 if (buf_trim() == 0) 1157 break; 1158 } 1159 } 1160 1161 simple_unlock(&bqueue_slock); 1162 splx(s); 1163} 1164 1165/* 1166 * Find a buffer which is available for use. 1167 * Select something from a free list. 1168 * Preference is to AGE list, then LRU list. 1169 * 1170 * Called at splbio and with buffer queues locked. 1171 * Return buffer locked. 1172 */ 1173struct buf * 1174getnewbuf(int slpflag, int slptimeo, int from_bufq) 1175{ 1176 struct buf *bp; 1177 1178start: 1179 LOCK_ASSERT(simple_lock_held(&bqueue_slock)); 1180 1181 /* 1182 * Get a new buffer from the pool; but use NOWAIT because 1183 * we have the buffer queues locked. 1184 */ 1185 if (!from_bufq && buf_lotsfree() && 1186 (bp = pool_get(&bufpool, PR_NOWAIT)) != NULL) { 1187 memset((char *)bp, 0, sizeof(*bp)); 1188 BUF_INIT(bp); 1189 bp->b_dev = NODEV; 1190 bp->b_vnbufs.le_next = NOLIST; 1191 bp->b_flags = B_BUSY; 1192 simple_lock(&bp->b_interlock); 1193#if defined(DIAGNOSTIC) 1194 bp->b_freelistindex = -1; 1195#endif /* defined(DIAGNOSTIC) */ 1196 return (bp); 1197 } 1198 1199 if ((bp = TAILQ_FIRST(&bufqueues[BQ_AGE].bq_queue)) != NULL || 1200 (bp = TAILQ_FIRST(&bufqueues[BQ_LRU].bq_queue)) != NULL) { 1201 simple_lock(&bp->b_interlock); 1202 bremfree(bp); 1203 } else { 1204 /* wait for a free buffer of any kind */ 1205 needbuffer = 1; 1206 ltsleep(&needbuffer, slpflag|(PRIBIO + 1), 1207 "getnewbuf", slptimeo, &bqueue_slock); 1208 return (NULL); 1209 } 1210 1211#ifdef DIAGNOSTIC 1212 if (bp->b_bufsize <= 0) 1213 panic("buffer %p: on queue but empty", bp); 1214#endif 1215 1216 if (ISSET(bp->b_flags, B_VFLUSH)) { 1217 /* 1218 * This is a delayed write buffer being flushed to disk. Make 1219 * sure it gets aged out of the queue when it's finished, and 1220 * leave it off the LRU queue. 1221 */ 1222 CLR(bp->b_flags, B_VFLUSH); 1223 SET(bp->b_flags, B_AGE); 1224 simple_unlock(&bp->b_interlock); 1225 goto start; 1226 } 1227 1228 /* Buffer is no longer on free lists. */ 1229 SET(bp->b_flags, B_BUSY); 1230 1231 /* 1232 * If buffer was a delayed write, start it and return NULL 1233 * (since we might sleep while starting the write). 1234 */ 1235 if (ISSET(bp->b_flags, B_DELWRI)) { 1236 /* 1237 * This buffer has gone through the LRU, so make sure it gets 1238 * reused ASAP. 1239 */ 1240 SET(bp->b_flags, B_AGE); 1241 simple_unlock(&bp->b_interlock); 1242 simple_unlock(&bqueue_slock); 1243 bawrite(bp); 1244 simple_lock(&bqueue_slock); 1245 return (NULL); 1246 } 1247 1248 /* disassociate us from our vnode, if we had one... */ 1249 if (bp->b_vp) 1250 brelvp(bp); 1251 1252 if (LIST_FIRST(&bp->b_dep) != NULL && bioops.io_deallocate) 1253 (*bioops.io_deallocate)(bp); 1254 1255 /* clear out various other fields */ 1256 bp->b_flags = B_BUSY; 1257 bp->b_dev = NODEV; 1258 bp->b_blkno = bp->b_lblkno = bp->b_rawblkno = 0; 1259 bp->b_iodone = 0; 1260 bp->b_error = 0; 1261 bp->b_resid = 0; 1262 bp->b_bcount = 0; 1263 1264 bremhash(bp); 1265 return (bp); 1266} 1267 1268/* 1269 * Attempt to free an aged buffer off the queues. 1270 * Called at splbio and with queue lock held. 1271 * Returns the amount of buffer memory freed. 1272 */ 1273static int 1274buf_trim(void) 1275{ 1276 struct buf *bp; 1277 long size = 0; 1278 1279 /* Instruct getnewbuf() to get buffers off the queues */ 1280 if ((bp = getnewbuf(PCATCH, 1, 1)) == NULL) 1281 return 0; 1282 1283 KASSERT(!ISSET(bp->b_flags, B_WANTED)); 1284 simple_unlock(&bp->b_interlock); 1285 size = bp->b_bufsize; 1286 bufmem -= size; 1287 simple_unlock(&bqueue_slock); 1288 if (size > 0) { 1289 buf_mrelease(bp->b_data, size); 1290 bp->b_bcount = bp->b_bufsize = 0; 1291 } 1292 /* brelse() will return the buffer to the global buffer pool */ 1293 brelse(bp); 1294 simple_lock(&bqueue_slock); 1295 return size; 1296} 1297 1298int 1299buf_drain(int n) 1300{ 1301 int s, size = 0; 1302 1303 s = splbio(); 1304 simple_lock(&bqueue_slock); 1305 1306 /* If not asked for a specific amount, make our own estimate */ 1307 if (n == 0) 1308 n = buf_canrelease(); 1309 1310 while (size < n && bufmem > bufmem_lowater) 1311 size += buf_trim(); 1312 1313 simple_unlock(&bqueue_slock); 1314 splx(s); 1315 return size; 1316} 1317 1318/* 1319 * Wait for operations on the buffer to complete. 1320 * When they do, extract and return the I/O's error value. 1321 */ 1322int 1323biowait(struct buf *bp) 1324{ 1325 int s, error; 1326 1327 s = splbio(); 1328 simple_lock(&bp->b_interlock); 1329 while (!ISSET(bp->b_flags, B_DONE | B_DELWRI)) 1330 ltsleep(bp, PRIBIO + 1, "biowait", 0, &bp->b_interlock); 1331 1332 /* check for interruption of I/O (e.g. via NFS), then errors. */ 1333 if (ISSET(bp->b_flags, B_EINTR)) { 1334 CLR(bp->b_flags, B_EINTR); 1335 error = EINTR; 1336 } else if (ISSET(bp->b_flags, B_ERROR)) 1337 error = bp->b_error ? bp->b_error : EIO; 1338 else 1339 error = 0; 1340 1341 simple_unlock(&bp->b_interlock); 1342 splx(s); 1343 return (error); 1344} 1345 1346/* 1347 * Mark I/O complete on a buffer. 1348 * 1349 * If a callback has been requested, e.g. the pageout 1350 * daemon, do so. Otherwise, awaken waiting processes. 1351 * 1352 * [ Leffler, et al., says on p.247: 1353 * "This routine wakes up the blocked process, frees the buffer 1354 * for an asynchronous write, or, for a request by the pagedaemon 1355 * process, invokes a procedure specified in the buffer structure" ] 1356 * 1357 * In real life, the pagedaemon (or other system processes) wants 1358 * to do async stuff to, and doesn't want the buffer brelse()'d. 1359 * (for swap pager, that puts swap buffers on the free lists (!!!), 1360 * for the vn device, that puts malloc'd buffers on the free lists!) 1361 */ 1362void 1363biodone(struct buf *bp) 1364{ 1365 int s = splbio(); 1366 1367 simple_lock(&bp->b_interlock); 1368 if (ISSET(bp->b_flags, B_DONE)) 1369 panic("biodone already"); 1370 SET(bp->b_flags, B_DONE); /* note that it's done */ 1371 BIO_SETPRIO(bp, BPRIO_DEFAULT); 1372 1373 if (LIST_FIRST(&bp->b_dep) != NULL && bioops.io_complete) 1374 (*bioops.io_complete)(bp); 1375 1376 if (!ISSET(bp->b_flags, B_READ)) /* wake up reader */ 1377 vwakeup(bp); 1378 1379 /* 1380 * If necessary, call out. Unlock the buffer before calling 1381 * iodone() as the buffer isn't valid any more when it return. 1382 */ 1383 if (ISSET(bp->b_flags, B_CALL)) { 1384 CLR(bp->b_flags, B_CALL); /* but note callout done */ 1385 simple_unlock(&bp->b_interlock); 1386 (*bp->b_iodone)(bp); 1387 } else { 1388 if (ISSET(bp->b_flags, B_ASYNC)) { /* if async, release */ 1389 simple_unlock(&bp->b_interlock); 1390 brelse(bp); 1391 } else { /* or just wakeup the buffer */ 1392 CLR(bp->b_flags, B_WANTED); 1393 wakeup(bp); 1394 simple_unlock(&bp->b_interlock); 1395 } 1396 } 1397 1398 splx(s); 1399} 1400 1401/* 1402 * Return a count of buffers on the "locked" queue. 1403 */ 1404int 1405count_lock_queue(void) 1406{ 1407 struct buf *bp; 1408 int n = 0; 1409 1410 simple_lock(&bqueue_slock); 1411 TAILQ_FOREACH(bp, &bufqueues[BQ_LOCKED].bq_queue, b_freelist) 1412 n++; 1413 simple_unlock(&bqueue_slock); 1414 return (n); 1415} 1416 1417/* 1418 * Wait for all buffers to complete I/O 1419 * Return the number of "stuck" buffers. 1420 */ 1421int 1422buf_syncwait(void) 1423{ 1424 struct buf *bp; 1425 int iter, nbusy, nbusy_prev = 0, dcount, s, ihash; 1426 1427 dcount = 10000; 1428 for (iter = 0; iter < 20;) { 1429 s = splbio(); 1430 simple_lock(&bqueue_slock); 1431 nbusy = 0; 1432 for (ihash = 0; ihash < bufhash+1; ihash++) { 1433 LIST_FOREACH(bp, &bufhashtbl[ihash], b_hash) { 1434 if ((bp->b_flags & (B_BUSY|B_INVAL|B_READ)) == B_BUSY) 1435 nbusy++; 1436 /* 1437 * With soft updates, some buffers that are 1438 * written will be remarked as dirty until other 1439 * buffers are written. 1440 */ 1441 if (bp->b_vp && bp->b_vp->v_mount 1442 && (bp->b_vp->v_mount->mnt_flag & MNT_SOFTDEP) 1443 && (bp->b_flags & B_DELWRI)) { 1444 simple_lock(&bp->b_interlock); 1445 bremfree(bp); 1446 bp->b_flags |= B_BUSY; 1447 nbusy++; 1448 simple_unlock(&bp->b_interlock); 1449 simple_unlock(&bqueue_slock); 1450 bawrite(bp); 1451 if (dcount-- <= 0) { 1452 printf("softdep "); 1453 goto fail; 1454 } 1455 simple_lock(&bqueue_slock); 1456 } 1457 } 1458 } 1459 1460 simple_unlock(&bqueue_slock); 1461 splx(s); 1462 1463 if (nbusy == 0) 1464 break; 1465 if (nbusy_prev == 0) 1466 nbusy_prev = nbusy; 1467 printf("%d ", nbusy); 1468 tsleep(&nbusy, PRIBIO, "bflush", 1469 (iter == 0) ? 1 : hz / 25 * iter); 1470 if (nbusy >= nbusy_prev) /* we didn't flush anything */ 1471 iter++; 1472 else 1473 nbusy_prev = nbusy; 1474 } 1475 1476 if (nbusy) { 1477fail:; 1478#if defined(DEBUG) || defined(DEBUG_HALT_BUSY) 1479 printf("giving up\nPrinting vnodes for busy buffers\n"); 1480 for (ihash = 0; ihash < bufhash+1; ihash++) { 1481 LIST_FOREACH(bp, &bufhashtbl[ihash], b_hash) { 1482 if ((bp->b_flags & (B_BUSY|B_INVAL|B_READ)) == B_BUSY) 1483 vprint(NULL, bp->b_vp); 1484 } 1485 } 1486#endif 1487 } 1488 1489 return nbusy; 1490} 1491 1492static void 1493sysctl_fillbuf(struct buf *i, struct buf_sysctl *o) 1494{ 1495 1496 o->b_flags = i->b_flags; 1497 o->b_error = i->b_error; 1498 o->b_prio = i->b_prio; 1499 o->b_dev = i->b_dev; 1500 o->b_bufsize = i->b_bufsize; 1501 o->b_bcount = i->b_bcount; 1502 o->b_resid = i->b_resid; 1503 o->b_addr = PTRTOUINT64(i->b_un.b_addr); 1504 o->b_blkno = i->b_blkno; 1505 o->b_rawblkno = i->b_rawblkno; 1506 o->b_iodone = PTRTOUINT64(i->b_iodone); 1507 o->b_proc = PTRTOUINT64(i->b_proc); 1508 o->b_vp = PTRTOUINT64(i->b_vp); 1509 o->b_saveaddr = PTRTOUINT64(i->b_saveaddr); 1510 o->b_lblkno = i->b_lblkno; 1511} 1512 1513#define KERN_BUFSLOP 20 1514static int 1515sysctl_dobuf(SYSCTLFN_ARGS) 1516{ 1517 struct buf *bp; 1518 struct buf_sysctl bs; 1519 char *dp; 1520 u_int i, op, arg; 1521 size_t len, needed, elem_size, out_size; 1522 int error, s, elem_count; 1523 1524 if (namelen == 1 && name[0] == CTL_QUERY) 1525 return (sysctl_query(SYSCTLFN_CALL(rnode))); 1526 1527 if (namelen != 4) 1528 return (EINVAL); 1529 1530 dp = oldp; 1531 len = (oldp != NULL) ? *oldlenp : 0; 1532 op = name[0]; 1533 arg = name[1]; 1534 elem_size = name[2]; 1535 elem_count = name[3]; 1536 out_size = MIN(sizeof(bs), elem_size); 1537 1538 /* 1539 * at the moment, these are just "placeholders" to make the 1540 * API for retrieving kern.buf data more extensible in the 1541 * future. 1542 * 1543 * XXX kern.buf currently has "netbsd32" issues. hopefully 1544 * these will be resolved at a later point. 1545 */ 1546 if (op != KERN_BUF_ALL || arg != KERN_BUF_ALL || 1547 elem_size < 1 || elem_count < 0) 1548 return (EINVAL); 1549 1550 error = 0; 1551 needed = 0; 1552 s = splbio(); 1553 simple_lock(&bqueue_slock); 1554 for (i = 0; i < BQUEUES; i++) { 1555 TAILQ_FOREACH(bp, &bufqueues[i].bq_queue, b_freelist) { 1556 if (len >= elem_size && elem_count > 0) { 1557 sysctl_fillbuf(bp, &bs); 1558 error = copyout(&bs, dp, out_size); 1559 if (error) 1560 goto cleanup; 1561 dp += elem_size; 1562 len -= elem_size; 1563 } 1564 if (elem_count > 0) { 1565 needed += elem_size; 1566 if (elem_count != INT_MAX) 1567 elem_count--; 1568 } 1569 } 1570 } 1571cleanup: 1572 simple_unlock(&bqueue_slock); 1573 splx(s); 1574 1575 *oldlenp = needed; 1576 if (oldp == NULL) 1577 *oldlenp += KERN_BUFSLOP * sizeof(struct buf); 1578 1579 return (error); 1580} 1581 1582static int 1583sysctl_bufvm_update(SYSCTLFN_ARGS) 1584{ 1585 int t, error; 1586 struct sysctlnode node; 1587 1588 node = *rnode; 1589 node.sysctl_data = &t; 1590 t = *(int*)rnode->sysctl_data; 1591 error = sysctl_lookup(SYSCTLFN_CALL(&node)); 1592 if (error || newp == NULL) 1593 return (error); 1594 1595 if (rnode->sysctl_data == &bufcache) { 1596 if (t < 0 || t > 100) 1597 return (EINVAL); 1598 bufcache = t; 1599 bufmem_hiwater = buf_memcalc(); 1600 bufmem_lowater = (bufmem_hiwater >> 3); 1601 if (bufmem_lowater < 64 * 1024) 1602 /* Ensure a reasonable minimum value */ 1603 bufmem_lowater = 64 * 1024; 1604 1605 } else if (rnode->sysctl_data == &bufmem_lowater) { 1606 bufmem_lowater = t; 1607 } else if (rnode->sysctl_data == &bufmem_hiwater) { 1608 bufmem_hiwater = t; 1609 } else 1610 return (EINVAL); 1611 1612 /* Drain until below new high water mark */ 1613 while ((t = bufmem - bufmem_hiwater) >= 0) { 1614 if (buf_drain(t / (2*1024)) <= 0) 1615 break; 1616 } 1617 1618 return 0; 1619} 1620 1621SYSCTL_SETUP(sysctl_kern_buf_setup, "sysctl kern.buf subtree setup") 1622{ 1623 1624 sysctl_createv(clog, 0, NULL, NULL, 1625 CTLFLAG_PERMANENT, 1626 CTLTYPE_NODE, "kern", NULL, 1627 NULL, 0, NULL, 0, 1628 CTL_KERN, CTL_EOL); 1629 sysctl_createv(clog, 0, NULL, NULL, 1630 CTLFLAG_PERMANENT, 1631 CTLTYPE_NODE, "buf", 1632 SYSCTL_DESCR("Kernel buffer cache information"), 1633 sysctl_dobuf, 0, NULL, 0, 1634 CTL_KERN, KERN_BUF, CTL_EOL); 1635} 1636 1637SYSCTL_SETUP(sysctl_vm_buf_setup, "sysctl vm.buf* subtree setup") 1638{ 1639 1640 sysctl_createv(clog, 0, NULL, NULL, 1641 CTLFLAG_PERMANENT, 1642 CTLTYPE_NODE, "vm", NULL, 1643 NULL, 0, NULL, 0, 1644 CTL_VM, CTL_EOL); 1645 1646 sysctl_createv(clog, 0, NULL, NULL, 1647 CTLFLAG_PERMANENT|CTLFLAG_READWRITE, 1648 CTLTYPE_INT, "bufcache", 1649 SYSCTL_DESCR("Percentage of kernel memory to use for " 1650 "buffer cache"), 1651 sysctl_bufvm_update, 0, &bufcache, 0, 1652 CTL_VM, CTL_CREATE, CTL_EOL); 1653 sysctl_createv(clog, 0, NULL, NULL, 1654 CTLFLAG_PERMANENT|CTLFLAG_READONLY, 1655 CTLTYPE_INT, "bufmem", 1656 SYSCTL_DESCR("Amount of kernel memory used by buffer " 1657 "cache"), 1658 NULL, 0, &bufmem, 0, 1659 CTL_VM, CTL_CREATE, CTL_EOL); 1660 sysctl_createv(clog, 0, NULL, NULL, 1661 CTLFLAG_PERMANENT|CTLFLAG_READWRITE, 1662 CTLTYPE_INT, "bufmem_lowater", 1663 SYSCTL_DESCR("Minimum amount of kernel memory to " 1664 "reserve for buffer cache"), 1665 sysctl_bufvm_update, 0, &bufmem_lowater, 0, 1666 CTL_VM, CTL_CREATE, CTL_EOL); 1667 sysctl_createv(clog, 0, NULL, NULL, 1668 CTLFLAG_PERMANENT|CTLFLAG_READWRITE, 1669 CTLTYPE_INT, "bufmem_hiwater", 1670 SYSCTL_DESCR("Maximum amount of kernel memory to use " 1671 "for buffer cache"), 1672 sysctl_bufvm_update, 0, &bufmem_hiwater, 0, 1673 CTL_VM, CTL_CREATE, CTL_EOL); 1674} 1675 1676#ifdef DEBUG 1677/* 1678 * Print out statistics on the current allocation of the buffer pool. 1679 * Can be enabled to print out on every ``sync'' by setting "syncprt" 1680 * in vfs_syscalls.c using sysctl. 1681 */ 1682void 1683vfs_bufstats(void) 1684{ 1685 int s, i, j, count; 1686 struct buf *bp; 1687 struct bqueue *dp; 1688 int counts[(MAXBSIZE / PAGE_SIZE) + 1]; 1689 static char *bname[BQUEUES] = { "LOCKED", "LRU", "AGE" }; 1690 1691 for (dp = bufqueues, i = 0; dp < &bufqueues[BQUEUES]; dp++, i++) { 1692 count = 0; 1693 for (j = 0; j <= MAXBSIZE/PAGE_SIZE; j++) 1694 counts[j] = 0; 1695 s = splbio(); 1696 TAILQ_FOREACH(bp, &dp->bq_queue, b_freelist) { 1697 counts[bp->b_bufsize/PAGE_SIZE]++; 1698 count++; 1699 } 1700 splx(s); 1701 printf("%s: total-%d", bname[i], count); 1702 for (j = 0; j <= MAXBSIZE/PAGE_SIZE; j++) 1703 if (counts[j] != 0) 1704 printf(", %d-%d", j * PAGE_SIZE, counts[j]); 1705 printf("\n"); 1706 } 1707} 1708#endif /* DEBUG */ 1709