vfs_bio.c revision 18291
1/* 2 * Copyright (c) 1994 John S. Dyson 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice immediately at the beginning of the file, without modification, 10 * this list of conditions, and the following disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 3. Absolutely no warranty of function or purpose is made by the author 15 * John S. Dyson. 16 * 4. This work was done expressly for inclusion into FreeBSD. Other use 17 * is allowed if this notation is included. 18 * 5. Modifications may be freely made to this file if the above conditions 19 * are met. 20 * 21 * $Id: vfs_bio.c,v 1.98 1996/09/08 20:44:20 dyson Exp $ 22 */ 23 24/* 25 * this file contains a new buffer I/O scheme implementing a coherent 26 * VM object and buffer cache scheme. Pains have been taken to make 27 * sure that the performance degradation associated with schemes such 28 * as this is not realized. 29 * 30 * Author: John S. Dyson 31 * Significant help during the development and debugging phases 32 * had been provided by David Greenman, also of the FreeBSD core team. 33 */ 34 35#include "opt_bounce.h" 36 37#define VMIO 38#include <sys/param.h> 39#include <sys/systm.h> 40#include <sys/sysproto.h> 41#include <sys/kernel.h> 42#include <sys/sysctl.h> 43#include <sys/proc.h> 44#include <sys/vnode.h> 45#include <sys/vmmeter.h> 46#include <vm/vm.h> 47#include <vm/vm_param.h> 48#include <vm/vm_prot.h> 49#include <vm/vm_kern.h> 50#include <vm/vm_pageout.h> 51#include <vm/vm_page.h> 52#include <vm/vm_object.h> 53#include <vm/vm_extern.h> 54#include <sys/buf.h> 55#include <sys/mount.h> 56#include <sys/malloc.h> 57#include <sys/resourcevar.h> 58#include <sys/proc.h> 59 60#include <miscfs/specfs/specdev.h> 61 62static void vfs_update __P((void)); 63static struct proc *updateproc; 64static struct kproc_desc up_kp = { 65 "update", 66 vfs_update, 67 &updateproc 68}; 69SYSINIT_KT(update, SI_SUB_KTHREAD_UPDATE, SI_ORDER_FIRST, kproc_start, &up_kp) 70 71struct buf *buf; /* buffer header pool */ 72struct swqueue bswlist; 73 74int count_lock_queue __P((void)); 75static void vm_hold_free_pages(struct buf * bp, vm_offset_t from, 76 vm_offset_t to); 77static void vm_hold_load_pages(struct buf * bp, vm_offset_t from, 78 vm_offset_t to); 79static void vfs_clean_pages(struct buf * bp); 80static void vfs_setdirty(struct buf *bp); 81static void vfs_vmio_release(struct buf *bp); 82 83int needsbuffer; 84 85/* 86 * Internal update daemon, process 3 87 * The variable vfs_update_wakeup allows for internal syncs. 88 */ 89int vfs_update_wakeup; 90 91 92/* 93 * buffers base kva 94 */ 95caddr_t buffers_kva; 96 97/* 98 * bogus page -- for I/O to/from partially complete buffers 99 * this is a temporary solution to the problem, but it is not 100 * really that bad. it would be better to split the buffer 101 * for input in the case of buffers partially already in memory, 102 * but the code is intricate enough already. 103 */ 104vm_page_t bogus_page; 105static vm_offset_t bogus_offset; 106 107static int bufspace, maxbufspace, vmiospace, maxvmiobufspace, 108 bufmallocspace, maxbufmallocspace; 109 110static struct bufhashhdr bufhashtbl[BUFHSZ], invalhash; 111static struct bqueues bufqueues[BUFFER_QUEUES]; 112 113extern int vm_swap_size; 114 115#define BUF_MAXUSE 8 116/* 117#define NO_B_MALLOC 118*/ 119 120/* 121 * Initialize buffer headers and related structures. 122 */ 123void 124bufinit() 125{ 126 struct buf *bp; 127 int i; 128 129 TAILQ_INIT(&bswlist); 130 LIST_INIT(&invalhash); 131 132 /* first, make a null hash table */ 133 for (i = 0; i < BUFHSZ; i++) 134 LIST_INIT(&bufhashtbl[i]); 135 136 /* next, make a null set of free lists */ 137 for (i = 0; i < BUFFER_QUEUES; i++) 138 TAILQ_INIT(&bufqueues[i]); 139 140 buffers_kva = (caddr_t) kmem_alloc_pageable(buffer_map, MAXBSIZE * nbuf); 141 /* finally, initialize each buffer header and stick on empty q */ 142 for (i = 0; i < nbuf; i++) { 143 bp = &buf[i]; 144 bzero(bp, sizeof *bp); 145 bp->b_flags = B_INVAL; /* we're just an empty header */ 146 bp->b_dev = NODEV; 147 bp->b_rcred = NOCRED; 148 bp->b_wcred = NOCRED; 149 bp->b_qindex = QUEUE_EMPTY; 150 bp->b_vnbufs.le_next = NOLIST; 151 bp->b_data = buffers_kva + i * MAXBSIZE; 152 TAILQ_INSERT_TAIL(&bufqueues[QUEUE_EMPTY], bp, b_freelist); 153 LIST_INSERT_HEAD(&invalhash, bp, b_hash); 154 } 155/* 156 * maxbufspace is currently calculated to support all filesystem blocks 157 * to be 8K. If you happen to use a 16K filesystem, the size of the buffer 158 * cache is still the same as it would be for 8K filesystems. This 159 * keeps the size of the buffer cache "in check" for big block filesystems. 160 */ 161 maxbufspace = 2 * (nbuf + 8) * PAGE_SIZE; 162/* 163 * reserve 1/3 of the buffers for metadata (VDIR) which might not be VMIO'ed 164 */ 165 maxvmiobufspace = 2 * maxbufspace / 3; 166/* 167 * Limit the amount of malloc memory since it is wired permanently into 168 * the kernel space. Even though this is accounted for in the buffer 169 * allocation, we don't want the malloced region to grow uncontrolled. 170 * The malloc scheme improves memory utilization significantly on average 171 * (small) directories. 172 */ 173 maxbufmallocspace = maxbufspace / 20; 174 175 bogus_offset = kmem_alloc_pageable(kernel_map, PAGE_SIZE); 176 bogus_page = vm_page_alloc(kernel_object, 177 ((bogus_offset - VM_MIN_KERNEL_ADDRESS) >> PAGE_SHIFT), 178 VM_ALLOC_NORMAL); 179 180} 181 182/* 183 * remove the buffer from the appropriate free list 184 */ 185void 186bremfree(struct buf * bp) 187{ 188 int s = splbio(); 189 190 if (bp->b_qindex != QUEUE_NONE) { 191 TAILQ_REMOVE(&bufqueues[bp->b_qindex], bp, b_freelist); 192 bp->b_qindex = QUEUE_NONE; 193 } else { 194 panic("bremfree: removing a buffer when not on a queue"); 195 } 196 splx(s); 197} 198 199/* 200 * Get a buffer with the specified data. Look in the cache first. 201 */ 202int 203bread(struct vnode * vp, daddr_t blkno, int size, struct ucred * cred, 204 struct buf ** bpp) 205{ 206 struct buf *bp; 207 208 bp = getblk(vp, blkno, size, 0, 0); 209 *bpp = bp; 210 211 /* if not found in cache, do some I/O */ 212 if ((bp->b_flags & B_CACHE) == 0) { 213 if (curproc != NULL) 214 curproc->p_stats->p_ru.ru_inblock++; 215 bp->b_flags |= B_READ; 216 bp->b_flags &= ~(B_DONE | B_ERROR | B_INVAL); 217 if (bp->b_rcred == NOCRED) { 218 if (cred != NOCRED) 219 crhold(cred); 220 bp->b_rcred = cred; 221 } 222 vfs_busy_pages(bp, 0); 223 VOP_STRATEGY(bp); 224 return (biowait(bp)); 225 } 226 return (0); 227} 228 229/* 230 * Operates like bread, but also starts asynchronous I/O on 231 * read-ahead blocks. 232 */ 233int 234breadn(struct vnode * vp, daddr_t blkno, int size, 235 daddr_t * rablkno, int *rabsize, 236 int cnt, struct ucred * cred, struct buf ** bpp) 237{ 238 struct buf *bp, *rabp; 239 int i; 240 int rv = 0, readwait = 0; 241 242 *bpp = bp = getblk(vp, blkno, size, 0, 0); 243 244 /* if not found in cache, do some I/O */ 245 if ((bp->b_flags & B_CACHE) == 0) { 246 if (curproc != NULL) 247 curproc->p_stats->p_ru.ru_inblock++; 248 bp->b_flags |= B_READ; 249 bp->b_flags &= ~(B_DONE | B_ERROR | B_INVAL); 250 if (bp->b_rcred == NOCRED) { 251 if (cred != NOCRED) 252 crhold(cred); 253 bp->b_rcred = cred; 254 } 255 vfs_busy_pages(bp, 0); 256 VOP_STRATEGY(bp); 257 ++readwait; 258 } 259 for (i = 0; i < cnt; i++, rablkno++, rabsize++) { 260 if (inmem(vp, *rablkno)) 261 continue; 262 rabp = getblk(vp, *rablkno, *rabsize, 0, 0); 263 264 if ((rabp->b_flags & B_CACHE) == 0) { 265 if (curproc != NULL) 266 curproc->p_stats->p_ru.ru_inblock++; 267 rabp->b_flags |= B_READ | B_ASYNC; 268 rabp->b_flags &= ~(B_DONE | B_ERROR | B_INVAL); 269 if (rabp->b_rcred == NOCRED) { 270 if (cred != NOCRED) 271 crhold(cred); 272 rabp->b_rcred = cred; 273 } 274 vfs_busy_pages(rabp, 0); 275 VOP_STRATEGY(rabp); 276 } else { 277 brelse(rabp); 278 } 279 } 280 281 if (readwait) { 282 rv = biowait(bp); 283 } 284 return (rv); 285} 286 287/* 288 * Write, release buffer on completion. (Done by iodone 289 * if async.) 290 */ 291int 292bwrite(struct buf * bp) 293{ 294 int oldflags = bp->b_flags; 295 296 if (bp->b_flags & B_INVAL) { 297 brelse(bp); 298 return (0); 299 } 300 if (!(bp->b_flags & B_BUSY)) 301 panic("bwrite: buffer is not busy???"); 302 303 bp->b_flags &= ~(B_READ | B_DONE | B_ERROR | B_DELWRI); 304 bp->b_flags |= B_WRITEINPROG; 305 306 if ((oldflags & (B_ASYNC|B_DELWRI)) == (B_ASYNC|B_DELWRI)) { 307 reassignbuf(bp, bp->b_vp); 308 } 309 310 bp->b_vp->v_numoutput++; 311 vfs_busy_pages(bp, 1); 312 if (curproc != NULL) 313 curproc->p_stats->p_ru.ru_oublock++; 314 VOP_STRATEGY(bp); 315 316 /* if ((bp->b_flags & B_ASYNC) == 0) { */ 317 if ((oldflags & B_ASYNC) == 0) { 318 int rtval = biowait(bp); 319 320 if (oldflags & B_DELWRI) { 321 reassignbuf(bp, bp->b_vp); 322 } 323 brelse(bp); 324 return (rtval); 325 } 326 return (0); 327} 328 329int 330vn_bwrite(ap) 331 struct vop_bwrite_args *ap; 332{ 333 return (bwrite(ap->a_bp)); 334} 335 336/* 337 * Delayed write. (Buffer is marked dirty). 338 */ 339void 340bdwrite(struct buf * bp) 341{ 342 343 if ((bp->b_flags & B_BUSY) == 0) { 344 panic("bdwrite: buffer is not busy"); 345 } 346 if (bp->b_flags & B_INVAL) { 347 brelse(bp); 348 return; 349 } 350 if (bp->b_flags & B_TAPE) { 351 bawrite(bp); 352 return; 353 } 354 bp->b_flags &= ~(B_READ|B_RELBUF); 355 if ((bp->b_flags & B_DELWRI) == 0) { 356 bp->b_flags |= B_DONE | B_DELWRI; 357 reassignbuf(bp, bp->b_vp); 358 } 359 360 /* 361 * This bmap keeps the system from needing to do the bmap later, 362 * perhaps when the system is attempting to do a sync. Since it 363 * is likely that the indirect block -- or whatever other datastructure 364 * that the filesystem needs is still in memory now, it is a good 365 * thing to do this. Note also, that if the pageout daemon is 366 * requesting a sync -- there might not be enough memory to do 367 * the bmap then... So, this is important to do. 368 */ 369 if( bp->b_lblkno == bp->b_blkno) { 370 VOP_BMAP(bp->b_vp, bp->b_lblkno, NULL, &bp->b_blkno, NULL, NULL); 371 } 372 373 /* 374 * Set the *dirty* buffer range based upon the VM system dirty pages. 375 */ 376 vfs_setdirty(bp); 377 378 /* 379 * We need to do this here to satisfy the vnode_pager and the 380 * pageout daemon, so that it thinks that the pages have been 381 * "cleaned". Note that since the pages are in a delayed write 382 * buffer -- the VFS layer "will" see that the pages get written 383 * out on the next sync, or perhaps the cluster will be completed. 384 */ 385 vfs_clean_pages(bp); 386 bqrelse(bp); 387 return; 388} 389 390/* 391 * Asynchronous write. 392 * Start output on a buffer, but do not wait for it to complete. 393 * The buffer is released when the output completes. 394 */ 395void 396bawrite(struct buf * bp) 397{ 398 bp->b_flags |= B_ASYNC; 399 (void) VOP_BWRITE(bp); 400} 401 402/* 403 * Ordered write. 404 * Start output on a buffer, but only wait for it to complete if the 405 * output device cannot guarantee ordering in some other way. Devices 406 * that can perform asynchronous ordered writes will set the B_ASYNC 407 * flag in their strategy routine. 408 * The buffer is released when the output completes. 409 */ 410int 411bowrite(struct buf * bp) 412{ 413 bp->b_flags |= B_ORDERED; 414 return (VOP_BWRITE(bp)); 415} 416 417/* 418 * Release a buffer. 419 */ 420void 421brelse(struct buf * bp) 422{ 423 int s; 424 425 if (bp->b_flags & B_CLUSTER) { 426 relpbuf(bp); 427 return; 428 } 429 /* anyone need a "free" block? */ 430 s = splbio(); 431 432 /* anyone need this block? */ 433 if (bp->b_flags & B_WANTED) { 434 bp->b_flags &= ~(B_WANTED | B_AGE); 435 wakeup(bp); 436 } 437 438 if (bp->b_flags & B_LOCKED) 439 bp->b_flags &= ~B_ERROR; 440 441 if ((bp->b_flags & (B_NOCACHE | B_INVAL | B_ERROR)) || 442 (bp->b_bufsize <= 0)) { 443 bp->b_flags |= B_INVAL; 444 bp->b_flags &= ~(B_DELWRI | B_CACHE); 445 if (((bp->b_flags & B_VMIO) == 0) && bp->b_vp) { 446 if (bp->b_bufsize) 447 allocbuf(bp, 0); 448 brelvp(bp); 449 } 450 } 451 452 /* 453 * VMIO buffer rundown. It is not very necessary to keep a VMIO buffer 454 * constituted, so the B_INVAL flag is used to *invalidate* the buffer, 455 * but the VM object is kept around. The B_NOCACHE flag is used to 456 * invalidate the pages in the VM object. 457 */ 458 if (bp->b_flags & B_VMIO) { 459 vm_ooffset_t foff; 460 vm_object_t obj; 461 int i, resid; 462 vm_page_t m; 463 struct vnode *vp; 464 int iototal = bp->b_bufsize; 465 466 vp = bp->b_vp; 467 if (!vp) 468 panic("brelse: missing vp"); 469 470 if (bp->b_npages) { 471 vm_pindex_t poff; 472 obj = (vm_object_t) vp->v_object; 473 if (vp->v_type == VBLK) 474 foff = ((vm_ooffset_t) bp->b_lblkno) << DEV_BSHIFT; 475 else 476 foff = (vm_ooffset_t) vp->v_mount->mnt_stat.f_iosize * bp->b_lblkno; 477 poff = OFF_TO_IDX(foff); 478 for (i = 0; i < bp->b_npages; i++) { 479 m = bp->b_pages[i]; 480 if (m == bogus_page) { 481 m = vm_page_lookup(obj, poff + i); 482 if (!m) { 483 panic("brelse: page missing\n"); 484 } 485 bp->b_pages[i] = m; 486 pmap_qenter(trunc_page(bp->b_data), 487 bp->b_pages, bp->b_npages); 488 } 489 resid = IDX_TO_OFF(m->pindex+1) - foff; 490 if (resid > iototal) 491 resid = iototal; 492 if (resid > 0) { 493 /* 494 * Don't invalidate the page if the local machine has already 495 * modified it. This is the lesser of two evils, and should 496 * be fixed. 497 */ 498 if (bp->b_flags & (B_NOCACHE | B_ERROR)) { 499 vm_page_test_dirty(m); 500 if (m->dirty == 0) { 501 vm_page_set_invalid(m, (vm_offset_t) foff, resid); 502 if (m->valid == 0) 503 vm_page_protect(m, VM_PROT_NONE); 504 } 505 } 506 if (resid >= PAGE_SIZE) { 507 if ((m->valid & VM_PAGE_BITS_ALL) != VM_PAGE_BITS_ALL) { 508 bp->b_flags |= B_INVAL; 509 } 510 } else { 511 if (!vm_page_is_valid(m, 512 (((vm_offset_t) bp->b_data) & PAGE_MASK), resid)) { 513 bp->b_flags |= B_INVAL; 514 } 515 } 516 } 517 foff += resid; 518 iototal -= resid; 519 } 520 } 521 if (bp->b_flags & (B_INVAL | B_RELBUF)) 522 vfs_vmio_release(bp); 523 } 524 if (bp->b_qindex != QUEUE_NONE) 525 panic("brelse: free buffer onto another queue???"); 526 527 /* enqueue */ 528 /* buffers with no memory */ 529 if (bp->b_bufsize == 0) { 530 bp->b_qindex = QUEUE_EMPTY; 531 TAILQ_INSERT_HEAD(&bufqueues[QUEUE_EMPTY], bp, b_freelist); 532 LIST_REMOVE(bp, b_hash); 533 LIST_INSERT_HEAD(&invalhash, bp, b_hash); 534 bp->b_dev = NODEV; 535 if (needsbuffer) { 536 wakeup(&needsbuffer); 537 needsbuffer=0; 538 } 539 /* buffers with junk contents */ 540 } else if (bp->b_flags & (B_ERROR | B_INVAL | B_NOCACHE | B_RELBUF)) { 541 bp->b_qindex = QUEUE_AGE; 542 TAILQ_INSERT_HEAD(&bufqueues[QUEUE_AGE], bp, b_freelist); 543 LIST_REMOVE(bp, b_hash); 544 LIST_INSERT_HEAD(&invalhash, bp, b_hash); 545 bp->b_dev = NODEV; 546 if (needsbuffer) { 547 wakeup(&needsbuffer); 548 needsbuffer=0; 549 } 550 /* buffers that are locked */ 551 } else if (bp->b_flags & B_LOCKED) { 552 bp->b_qindex = QUEUE_LOCKED; 553 TAILQ_INSERT_TAIL(&bufqueues[QUEUE_LOCKED], bp, b_freelist); 554 /* buffers with stale but valid contents */ 555 } else if (bp->b_flags & B_AGE) { 556 bp->b_qindex = QUEUE_AGE; 557 TAILQ_INSERT_TAIL(&bufqueues[QUEUE_AGE], bp, b_freelist); 558 if (needsbuffer) { 559 wakeup(&needsbuffer); 560 needsbuffer=0; 561 } 562 /* buffers with valid and quite potentially reuseable contents */ 563 } else { 564 bp->b_qindex = QUEUE_LRU; 565 TAILQ_INSERT_TAIL(&bufqueues[QUEUE_LRU], bp, b_freelist); 566 if (needsbuffer) { 567 wakeup(&needsbuffer); 568 needsbuffer=0; 569 } 570 } 571 572 /* unlock */ 573 bp->b_flags &= ~(B_WANTED | B_BUSY | B_ASYNC | B_NOCACHE | B_AGE | B_RELBUF); 574 splx(s); 575} 576 577/* 578 * Release a buffer. 579 */ 580void 581bqrelse(struct buf * bp) 582{ 583 int s; 584 585 s = splbio(); 586 587 588 /* anyone need this block? */ 589 if (bp->b_flags & B_WANTED) { 590 bp->b_flags &= ~(B_WANTED | B_AGE); 591 wakeup(bp); 592 } 593 594 if (bp->b_qindex != QUEUE_NONE) 595 panic("bqrelse: free buffer onto another queue???"); 596 597 if (bp->b_flags & B_LOCKED) { 598 bp->b_flags &= ~B_ERROR; 599 bp->b_qindex = QUEUE_LOCKED; 600 TAILQ_INSERT_TAIL(&bufqueues[QUEUE_LOCKED], bp, b_freelist); 601 /* buffers with stale but valid contents */ 602 } else { 603 bp->b_qindex = QUEUE_LRU; 604 TAILQ_INSERT_TAIL(&bufqueues[QUEUE_LRU], bp, b_freelist); 605 if (needsbuffer) { 606 wakeup(&needsbuffer); 607 needsbuffer=0; 608 } 609 } 610 611 /* unlock */ 612 bp->b_flags &= ~(B_WANTED | B_BUSY | B_ASYNC | B_NOCACHE | B_AGE | B_RELBUF); 613 splx(s); 614} 615 616static void 617vfs_vmio_release(bp) 618 struct buf *bp; 619{ 620 int i; 621 vm_page_t m; 622 623 for (i = 0; i < bp->b_npages; i++) { 624 m = bp->b_pages[i]; 625 bp->b_pages[i] = NULL; 626 while ((m->flags & PG_BUSY) || (m->busy != 0)) { 627 m->flags |= PG_WANTED; 628 tsleep(m, PVM, "vmiorl", 0); 629 } 630 631 vm_page_unwire(m); 632 633 if (m->wire_count == 0) { 634 635 if (m->flags & PG_WANTED) { 636 m->flags &= ~PG_WANTED; 637 wakeup(m); 638 } 639 640 if (m->valid) { 641 if(m->dirty == 0) 642 vm_page_test_dirty(m); 643 /* 644 * this keeps pressure off of the process memory 645 */ 646 if ((vm_swap_size == 0) || 647 (cnt.v_free_count < cnt.v_free_min)) { 648 if ((m->dirty == 0) && 649 (m->hold_count == 0) && 650 (m->busy == 0)) 651 vm_page_cache(m); 652 else 653 vm_page_deactivate(m); 654 } 655 } else if ((m->hold_count == 0) && 656 (m->busy == 0)) { 657 vm_page_protect(m, VM_PROT_NONE); 658 vm_page_free(m); 659 } 660 } 661 } 662 bufspace -= bp->b_bufsize; 663 vmiospace -= bp->b_bufsize; 664 pmap_qremove(trunc_page((vm_offset_t) bp->b_data), bp->b_npages); 665 bp->b_npages = 0; 666 bp->b_bufsize = 0; 667 bp->b_flags &= ~B_VMIO; 668 if (bp->b_vp) 669 brelvp(bp); 670} 671 672/* 673 * Check to see if a block is currently memory resident. 674 */ 675__inline struct buf * 676gbincore(struct vnode * vp, daddr_t blkno) 677{ 678 struct buf *bp; 679 struct bufhashhdr *bh; 680 681 bh = BUFHASH(vp, blkno); 682 bp = bh->lh_first; 683 684 /* Search hash chain */ 685 while (bp != NULL) { 686 /* hit */ 687 if (bp->b_vp == vp && bp->b_lblkno == blkno && 688 (bp->b_flags & B_INVAL) == 0) { 689 break; 690 } 691 bp = bp->b_hash.le_next; 692 } 693 return (bp); 694} 695 696/* 697 * this routine implements clustered async writes for 698 * clearing out B_DELWRI buffers... This is much better 699 * than the old way of writing only one buffer at a time. 700 */ 701int 702vfs_bio_awrite(struct buf * bp) 703{ 704 int i; 705 daddr_t lblkno = bp->b_lblkno; 706 struct vnode *vp = bp->b_vp; 707 int s; 708 int ncl; 709 struct buf *bpa; 710 int nwritten; 711 712 s = splbio(); 713 /* 714 * right now we support clustered writing only to regular files 715 */ 716 if ((vp->v_type == VREG) && 717 (vp->v_mount != 0) && /* Only on nodes that have the size info */ 718 (bp->b_flags & (B_CLUSTEROK | B_INVAL)) == B_CLUSTEROK) { 719 int size; 720 int maxcl; 721 722 size = vp->v_mount->mnt_stat.f_iosize; 723 maxcl = MAXPHYS / size; 724 725 for (i = 1; i < maxcl; i++) { 726 if ((bpa = gbincore(vp, lblkno + i)) && 727 ((bpa->b_flags & (B_BUSY | B_DELWRI | B_CLUSTEROK | B_INVAL)) == 728 (B_DELWRI | B_CLUSTEROK)) && 729 (bpa->b_bufsize == size)) { 730 if ((bpa->b_blkno == bpa->b_lblkno) || 731 (bpa->b_blkno != bp->b_blkno + ((i * size) >> DEV_BSHIFT))) 732 break; 733 } else { 734 break; 735 } 736 } 737 ncl = i; 738 /* 739 * this is a possible cluster write 740 */ 741 if (ncl != 1) { 742 nwritten = cluster_wbuild(vp, size, lblkno, ncl); 743 splx(s); 744 return nwritten; 745 } 746 } 747 bremfree(bp); 748 splx(s); 749 /* 750 * default (old) behavior, writing out only one block 751 */ 752 bp->b_flags |= B_BUSY | B_ASYNC; 753 nwritten = bp->b_bufsize; 754 (void) VOP_BWRITE(bp); 755 return nwritten; 756} 757 758 759/* 760 * Find a buffer header which is available for use. 761 */ 762static struct buf * 763getnewbuf(int slpflag, int slptimeo, int doingvmio) 764{ 765 struct buf *bp; 766 int nbyteswritten = 0; 767 768start: 769 if (bufspace >= maxbufspace) 770 goto trytofreespace; 771 772 /* can we constitute a new buffer? */ 773 if ((bp = TAILQ_FIRST(&bufqueues[QUEUE_EMPTY]))) { 774 if (bp->b_qindex != QUEUE_EMPTY) 775 panic("getnewbuf: inconsistent EMPTY queue, qindex=%d", 776 bp->b_qindex); 777 bp->b_flags |= B_BUSY; 778 bremfree(bp); 779 goto fillbuf; 780 } 781trytofreespace: 782 /* 783 * We keep the file I/O from hogging metadata I/O 784 * This is desirable because file data is cached in the 785 * VM/Buffer cache even if a buffer is freed. 786 */ 787 if ((bp = TAILQ_FIRST(&bufqueues[QUEUE_AGE]))) { 788 if (bp->b_qindex != QUEUE_AGE) 789 panic("getnewbuf: inconsistent AGE queue, qindex=%d", 790 bp->b_qindex); 791 } else if ((bp = TAILQ_FIRST(&bufqueues[QUEUE_LRU]))) { 792 if (bp->b_qindex != QUEUE_LRU) 793 panic("getnewbuf: inconsistent LRU queue, qindex=%d", 794 bp->b_qindex); 795 } 796 if (!bp) { 797 /* wait for a free buffer of any kind */ 798 needsbuffer = 1; 799 tsleep(&needsbuffer, 800 (PRIBIO + 1) | slpflag, "newbuf", slptimeo); 801 return (0); 802 } 803 804 /* 805 * We are fairly aggressive about freeing VMIO buffers, but since 806 * the buffering is intact without buffer headers, there is not 807 * much loss. We gain by maintaining non-VMIOed metadata in buffers. 808 */ 809 if ((bp->b_qindex == QUEUE_LRU) && (bp->b_usecount > 0)) { 810 if ((bp->b_flags & B_VMIO) == 0 || 811 (vmiospace < maxvmiobufspace)) { 812 --bp->b_usecount; 813 TAILQ_REMOVE(&bufqueues[QUEUE_LRU], bp, b_freelist); 814 if (TAILQ_FIRST(&bufqueues[QUEUE_LRU]) != NULL) { 815 TAILQ_INSERT_TAIL(&bufqueues[QUEUE_LRU], bp, b_freelist); 816 goto start; 817 } 818 TAILQ_INSERT_TAIL(&bufqueues[QUEUE_LRU], bp, b_freelist); 819 } 820 } 821 822 /* if we are a delayed write, convert to an async write */ 823 if ((bp->b_flags & (B_DELWRI | B_INVAL)) == B_DELWRI) { 824 nbyteswritten += vfs_bio_awrite(bp); 825 if (!slpflag && !slptimeo) { 826 return (0); 827 } 828 goto start; 829 } 830 831 if (bp->b_flags & B_WANTED) { 832 bp->b_flags &= ~B_WANTED; 833 wakeup(bp); 834 } 835 bremfree(bp); 836 bp->b_flags |= B_BUSY; 837 838 if (bp->b_flags & B_VMIO) 839 vfs_vmio_release(bp); 840 841 if (bp->b_vp) 842 brelvp(bp); 843 844fillbuf: 845 /* we are not free, nor do we contain interesting data */ 846 if (bp->b_rcred != NOCRED) { 847 crfree(bp->b_rcred); 848 bp->b_rcred = NOCRED; 849 } 850 if (bp->b_wcred != NOCRED) { 851 crfree(bp->b_wcred); 852 bp->b_wcred = NOCRED; 853 } 854 855 LIST_REMOVE(bp, b_hash); 856 LIST_INSERT_HEAD(&invalhash, bp, b_hash); 857 if (bp->b_bufsize) { 858 allocbuf(bp, 0); 859 } 860 bp->b_flags = B_BUSY; 861 bp->b_dev = NODEV; 862 bp->b_vp = NULL; 863 bp->b_blkno = bp->b_lblkno = 0; 864 bp->b_iodone = 0; 865 bp->b_error = 0; 866 bp->b_resid = 0; 867 bp->b_bcount = 0; 868 bp->b_npages = 0; 869 bp->b_data = buffers_kva + (bp - buf) * MAXBSIZE; 870 bp->b_dirtyoff = bp->b_dirtyend = 0; 871 bp->b_validoff = bp->b_validend = 0; 872 bp->b_usecount = 4; 873 if (bufspace >= maxbufspace + nbyteswritten) { 874 bp->b_flags |= B_INVAL; 875 brelse(bp); 876 goto trytofreespace; 877 } 878 return (bp); 879} 880 881/* 882 * Check to see if a block is currently memory resident. 883 */ 884struct buf * 885incore(struct vnode * vp, daddr_t blkno) 886{ 887 struct buf *bp; 888 889 int s = splbio(); 890 bp = gbincore(vp, blkno); 891 splx(s); 892 return (bp); 893} 894 895/* 896 * Returns true if no I/O is needed to access the 897 * associated VM object. This is like incore except 898 * it also hunts around in the VM system for the data. 899 */ 900 901int 902inmem(struct vnode * vp, daddr_t blkno) 903{ 904 vm_object_t obj; 905 vm_offset_t toff, tinc; 906 vm_page_t m; 907 vm_ooffset_t off; 908 909 if (incore(vp, blkno)) 910 return 1; 911 if (vp->v_mount == NULL) 912 return 0; 913 if ((vp->v_object == NULL) || (vp->v_flag & VVMIO) == 0) 914 return 0; 915 916 obj = vp->v_object; 917 tinc = PAGE_SIZE; 918 if (tinc > vp->v_mount->mnt_stat.f_iosize) 919 tinc = vp->v_mount->mnt_stat.f_iosize; 920 off = blkno * vp->v_mount->mnt_stat.f_iosize; 921 922 for (toff = 0; toff < vp->v_mount->mnt_stat.f_iosize; toff += tinc) { 923 924 m = vm_page_lookup(obj, OFF_TO_IDX(off + toff)); 925 if (!m) 926 return 0; 927 if (vm_page_is_valid(m, (vm_offset_t) (toff + off), tinc) == 0) 928 return 0; 929 } 930 return 1; 931} 932 933/* 934 * now we set the dirty range for the buffer -- 935 * for NFS -- if the file is mapped and pages have 936 * been written to, let it know. We want the 937 * entire range of the buffer to be marked dirty if 938 * any of the pages have been written to for consistancy 939 * with the b_validoff, b_validend set in the nfs write 940 * code, and used by the nfs read code. 941 */ 942static void 943vfs_setdirty(struct buf *bp) { 944 int i; 945 vm_object_t object; 946 vm_offset_t boffset, offset; 947 /* 948 * We qualify the scan for modified pages on whether the 949 * object has been flushed yet. The OBJ_WRITEABLE flag 950 * is not cleared simply by protecting pages off. 951 */ 952 if ((bp->b_flags & B_VMIO) && 953 ((object = bp->b_pages[0]->object)->flags & (OBJ_WRITEABLE|OBJ_CLEANING))) { 954 /* 955 * test the pages to see if they have been modified directly 956 * by users through the VM system. 957 */ 958 for (i = 0; i < bp->b_npages; i++) 959 vm_page_test_dirty(bp->b_pages[i]); 960 961 /* 962 * scan forwards for the first page modified 963 */ 964 for (i = 0; i < bp->b_npages; i++) { 965 if (bp->b_pages[i]->dirty) { 966 break; 967 } 968 } 969 boffset = (i << PAGE_SHIFT); 970 if (boffset < bp->b_dirtyoff) { 971 bp->b_dirtyoff = boffset; 972 } 973 974 /* 975 * scan backwards for the last page modified 976 */ 977 for (i = bp->b_npages - 1; i >= 0; --i) { 978 if (bp->b_pages[i]->dirty) { 979 break; 980 } 981 } 982 boffset = (i + 1); 983 offset = boffset + bp->b_pages[0]->pindex; 984 if (offset >= object->size) 985 boffset = object->size - bp->b_pages[0]->pindex; 986 if (bp->b_dirtyend < (boffset << PAGE_SHIFT)) 987 bp->b_dirtyend = (boffset << PAGE_SHIFT); 988 } 989} 990 991/* 992 * Get a block given a specified block and offset into a file/device. 993 */ 994struct buf * 995getblk(struct vnode * vp, daddr_t blkno, int size, int slpflag, int slptimeo) 996{ 997 struct buf *bp; 998 int s; 999 struct bufhashhdr *bh; 1000 1001 s = splbio(); 1002loop: 1003 if ((bp = gbincore(vp, blkno))) { 1004 if (bp->b_flags & B_BUSY) { 1005 bp->b_flags |= B_WANTED; 1006 if (bp->b_usecount < BUF_MAXUSE) 1007 ++bp->b_usecount; 1008 if (!tsleep(bp, 1009 (PRIBIO + 1) | slpflag, "getblk", slptimeo)) 1010 goto loop; 1011 1012 splx(s); 1013 return (struct buf *) NULL; 1014 } 1015 bp->b_flags |= B_BUSY | B_CACHE; 1016 bremfree(bp); 1017 1018 /* 1019 * check for size inconsistancies (note that they shouldn't happen 1020 * but do when filesystems don't handle the size changes correctly.) 1021 * We are conservative on metadata and don't just extend the buffer 1022 * but write and re-constitute it. 1023 */ 1024 1025 if (bp->b_bcount != size) { 1026 if (bp->b_flags & B_VMIO) { 1027 allocbuf(bp, size); 1028 } else { 1029 bp->b_flags |= B_NOCACHE; 1030 VOP_BWRITE(bp); 1031 goto loop; 1032 } 1033 } 1034 1035 if (bp->b_usecount < BUF_MAXUSE) 1036 ++bp->b_usecount; 1037 splx(s); 1038 return (bp); 1039 } else { 1040 vm_object_t obj; 1041 int doingvmio; 1042 1043 if ((obj = vp->v_object) && (vp->v_flag & VVMIO)) { 1044 doingvmio = 1; 1045 } else { 1046 doingvmio = 0; 1047 } 1048 if ((bp = getnewbuf(slpflag, slptimeo, doingvmio)) == 0) { 1049 if (slpflag || slptimeo) { 1050 splx(s); 1051 return NULL; 1052 } 1053 goto loop; 1054 } 1055 1056 /* 1057 * This code is used to make sure that a buffer is not 1058 * created while the getnewbuf routine is blocked. 1059 * Normally the vnode is locked so this isn't a problem. 1060 * VBLK type I/O requests, however, don't lock the vnode. 1061 */ 1062 if (!VOP_ISLOCKED(vp) && gbincore(vp, blkno)) { 1063 bp->b_flags |= B_INVAL; 1064 brelse(bp); 1065 goto loop; 1066 } 1067 1068 /* 1069 * Insert the buffer into the hash, so that it can 1070 * be found by incore. 1071 */ 1072 bp->b_blkno = bp->b_lblkno = blkno; 1073 bgetvp(vp, bp); 1074 LIST_REMOVE(bp, b_hash); 1075 bh = BUFHASH(vp, blkno); 1076 LIST_INSERT_HEAD(bh, bp, b_hash); 1077 1078 if (doingvmio) { 1079 bp->b_flags |= (B_VMIO | B_CACHE); 1080#if defined(VFS_BIO_DEBUG) 1081 if (vp->v_type != VREG && vp->v_type != VBLK) 1082 printf("getblk: vmioing file type %d???\n", vp->v_type); 1083#endif 1084 } else { 1085 bp->b_flags &= ~B_VMIO; 1086 } 1087 splx(s); 1088 1089 allocbuf(bp, size); 1090#ifdef PC98 1091 /* 1092 * 1024byte/sector support 1093 */ 1094#define B_XXX2 0x8000000 1095 if (vp->v_flag & 0x10000) bp->b_flags |= B_XXX2; 1096#endif 1097 return (bp); 1098 } 1099} 1100 1101/* 1102 * Get an empty, disassociated buffer of given size. 1103 */ 1104struct buf * 1105geteblk(int size) 1106{ 1107 struct buf *bp; 1108 int s; 1109 1110 s = splbio(); 1111 while ((bp = getnewbuf(0, 0, 0)) == 0); 1112 splx(s); 1113 allocbuf(bp, size); 1114 bp->b_flags |= B_INVAL; 1115 return (bp); 1116} 1117 1118 1119/* 1120 * This code constitutes the buffer memory from either anonymous system 1121 * memory (in the case of non-VMIO operations) or from an associated 1122 * VM object (in the case of VMIO operations). 1123 * 1124 * Note that this code is tricky, and has many complications to resolve 1125 * deadlock or inconsistant data situations. Tread lightly!!! 1126 * 1127 * Modify the length of a buffer's underlying buffer storage without 1128 * destroying information (unless, of course the buffer is shrinking). 1129 */ 1130int 1131allocbuf(struct buf * bp, int size) 1132{ 1133 1134 int s; 1135 int newbsize, mbsize; 1136 int i; 1137 1138 if (!(bp->b_flags & B_BUSY)) 1139 panic("allocbuf: buffer not busy"); 1140 1141 if ((bp->b_flags & B_VMIO) == 0) { 1142 caddr_t origbuf; 1143 int origbufsize; 1144 /* 1145 * Just get anonymous memory from the kernel 1146 */ 1147 mbsize = (size + DEV_BSIZE - 1) & ~(DEV_BSIZE - 1); 1148#if !defined(NO_B_MALLOC) 1149 if (bp->b_flags & B_MALLOC) 1150 newbsize = mbsize; 1151 else 1152#endif 1153 newbsize = round_page(size); 1154 1155 if (newbsize < bp->b_bufsize) { 1156#if !defined(NO_B_MALLOC) 1157 /* 1158 * malloced buffers are not shrunk 1159 */ 1160 if (bp->b_flags & B_MALLOC) { 1161 if (newbsize) { 1162 bp->b_bcount = size; 1163 } else { 1164 free(bp->b_data, M_BIOBUF); 1165 bufspace -= bp->b_bufsize; 1166 bufmallocspace -= bp->b_bufsize; 1167 bp->b_data = (caddr_t) buffers_kva + (bp - buf) * MAXBSIZE; 1168 bp->b_bufsize = 0; 1169 bp->b_bcount = 0; 1170 bp->b_flags &= ~B_MALLOC; 1171 } 1172 return 1; 1173 } 1174#endif 1175 vm_hold_free_pages( 1176 bp, 1177 (vm_offset_t) bp->b_data + newbsize, 1178 (vm_offset_t) bp->b_data + bp->b_bufsize); 1179 } else if (newbsize > bp->b_bufsize) { 1180#if !defined(NO_B_MALLOC) 1181 /* 1182 * We only use malloced memory on the first allocation. 1183 * and revert to page-allocated memory when the buffer grows. 1184 */ 1185 if ( (bufmallocspace < maxbufmallocspace) && 1186 (bp->b_bufsize == 0) && 1187 (mbsize <= PAGE_SIZE/2)) { 1188 1189 bp->b_data = malloc(mbsize, M_BIOBUF, M_WAITOK); 1190 bp->b_bufsize = mbsize; 1191 bp->b_bcount = size; 1192 bp->b_flags |= B_MALLOC; 1193 bufspace += mbsize; 1194 bufmallocspace += mbsize; 1195 return 1; 1196 } 1197#endif 1198 origbuf = NULL; 1199 origbufsize = 0; 1200#if !defined(NO_B_MALLOC) 1201 /* 1202 * If the buffer is growing on it's other-than-first allocation, 1203 * then we revert to the page-allocation scheme. 1204 */ 1205 if (bp->b_flags & B_MALLOC) { 1206 origbuf = bp->b_data; 1207 origbufsize = bp->b_bufsize; 1208 bp->b_data = (caddr_t) buffers_kva + (bp - buf) * MAXBSIZE; 1209 bufspace -= bp->b_bufsize; 1210 bufmallocspace -= bp->b_bufsize; 1211 bp->b_bufsize = 0; 1212 bp->b_flags &= ~B_MALLOC; 1213 newbsize = round_page(newbsize); 1214 } 1215#endif 1216 vm_hold_load_pages( 1217 bp, 1218 (vm_offset_t) bp->b_data + bp->b_bufsize, 1219 (vm_offset_t) bp->b_data + newbsize); 1220#if !defined(NO_B_MALLOC) 1221 if (origbuf) { 1222 bcopy(origbuf, bp->b_data, origbufsize); 1223 free(origbuf, M_BIOBUF); 1224 } 1225#endif 1226 } 1227 } else { 1228 vm_page_t m; 1229 int desiredpages; 1230 1231 newbsize = (size + DEV_BSIZE - 1) & ~(DEV_BSIZE - 1); 1232 desiredpages = (round_page(newbsize) >> PAGE_SHIFT); 1233 1234#if !defined(NO_B_MALLOC) 1235 if (bp->b_flags & B_MALLOC) 1236 panic("allocbuf: VMIO buffer can't be malloced"); 1237#endif 1238 1239 if (newbsize < bp->b_bufsize) { 1240 if (desiredpages < bp->b_npages) { 1241 for (i = desiredpages; i < bp->b_npages; i++) { 1242 /* 1243 * the page is not freed here -- it 1244 * is the responsibility of vnode_pager_setsize 1245 */ 1246 m = bp->b_pages[i]; 1247 s = splhigh(); 1248 while ((m->flags & PG_BUSY) || (m->busy != 0)) { 1249 m->flags |= PG_WANTED; 1250 tsleep(m, PVM, "biodep", 0); 1251 } 1252 splx(s); 1253 1254 bp->b_pages[i] = NULL; 1255 vm_page_unwire(m); 1256 } 1257 pmap_qremove((vm_offset_t) trunc_page(bp->b_data) + 1258 (desiredpages << PAGE_SHIFT), (bp->b_npages - desiredpages)); 1259 bp->b_npages = desiredpages; 1260 } 1261 } else if (newbsize > bp->b_bufsize) { 1262 vm_object_t obj; 1263 vm_offset_t tinc, toff; 1264 vm_ooffset_t off; 1265 vm_pindex_t objoff; 1266 int pageindex, curbpnpages; 1267 struct vnode *vp; 1268 int bsize; 1269 1270 vp = bp->b_vp; 1271 1272 if (vp->v_type == VBLK) 1273 bsize = DEV_BSIZE; 1274 else 1275 bsize = vp->v_mount->mnt_stat.f_iosize; 1276 1277 if (bp->b_npages < desiredpages) { 1278 obj = vp->v_object; 1279 tinc = PAGE_SIZE; 1280 if (tinc > bsize) 1281 tinc = bsize; 1282 off = (vm_ooffset_t) bp->b_lblkno * bsize; 1283 doretry: 1284 curbpnpages = bp->b_npages; 1285 bp->b_flags |= B_CACHE; 1286 for (toff = 0; toff < newbsize; toff += tinc) { 1287 int bytesinpage; 1288 1289 pageindex = toff >> PAGE_SHIFT; 1290 objoff = OFF_TO_IDX(off + toff); 1291 if (pageindex < curbpnpages) { 1292 1293 m = bp->b_pages[pageindex]; 1294#ifdef VFS_BIO_DIAG 1295 if (m->pindex != objoff) 1296 panic("allocbuf: page changed offset??!!!?"); 1297#endif 1298 bytesinpage = tinc; 1299 if (tinc > (newbsize - toff)) 1300 bytesinpage = newbsize - toff; 1301 if ((bp->b_flags & B_CACHE) && 1302 !vm_page_is_valid(m, 1303 (vm_offset_t) ((toff + off) & PAGE_MASK), 1304 bytesinpage)) { 1305 bp->b_flags &= ~B_CACHE; 1306 } 1307 continue; 1308 } 1309 m = vm_page_lookup(obj, objoff); 1310 if (!m) { 1311 m = vm_page_alloc(obj, objoff, VM_ALLOC_NORMAL); 1312 if (!m) { 1313 VM_WAIT; 1314 goto doretry; 1315 } 1316 /* 1317 * Normally it is unwise to clear PG_BUSY without 1318 * PAGE_WAKEUP -- but it is okay here, as there is 1319 * no chance for blocking between here and vm_page_alloc 1320 */ 1321 m->flags &= ~PG_BUSY; 1322 vm_page_wire(m); 1323 bp->b_flags &= ~B_CACHE; 1324 } else if (m->flags & PG_BUSY) { 1325 1326 s = splhigh(); 1327 m->flags |= PG_WANTED; 1328 tsleep(m, PVM, "pgtblk", 0); 1329 splx(s); 1330 1331 goto doretry; 1332 } else { 1333 if ((curproc != pageproc) && 1334 ((m->queue - m->pc) == PQ_CACHE) && 1335 ((cnt.v_free_count + cnt.v_cache_count) < 1336 (cnt.v_free_min + cnt.v_cache_min))) { 1337 pagedaemon_wakeup(); 1338 } 1339 bytesinpage = tinc; 1340 if (tinc > (newbsize - toff)) 1341 bytesinpage = newbsize - toff; 1342 if ((bp->b_flags & B_CACHE) && 1343 !vm_page_is_valid(m, 1344 (vm_offset_t) ((toff + off) & PAGE_MASK), 1345 bytesinpage)) { 1346 bp->b_flags &= ~B_CACHE; 1347 } 1348 vm_page_wire(m); 1349 } 1350 bp->b_pages[pageindex] = m; 1351 curbpnpages = pageindex + 1; 1352 } 1353 bp->b_data = (caddr_t) trunc_page(bp->b_data); 1354 bp->b_npages = curbpnpages; 1355 pmap_qenter((vm_offset_t) bp->b_data, 1356 bp->b_pages, bp->b_npages); 1357 ((vm_offset_t) bp->b_data) |= off & PAGE_MASK; 1358 } 1359 } 1360 } 1361 if (bp->b_flags & B_VMIO) 1362 vmiospace += bp->b_bufsize; 1363 bufspace += (newbsize - bp->b_bufsize); 1364 bp->b_bufsize = newbsize; 1365 bp->b_bcount = size; 1366 return 1; 1367} 1368 1369/* 1370 * Wait for buffer I/O completion, returning error status. 1371 */ 1372int 1373biowait(register struct buf * bp) 1374{ 1375 int s; 1376 1377 s = splbio(); 1378 while ((bp->b_flags & B_DONE) == 0) 1379 tsleep(bp, PRIBIO, "biowait", 0); 1380 splx(s); 1381 if (bp->b_flags & B_EINTR) { 1382 bp->b_flags &= ~B_EINTR; 1383 return (EINTR); 1384 } 1385 if (bp->b_flags & B_ERROR) { 1386 return (bp->b_error ? bp->b_error : EIO); 1387 } else { 1388 return (0); 1389 } 1390} 1391 1392/* 1393 * Finish I/O on a buffer, calling an optional function. 1394 * This is usually called from interrupt level, so process blocking 1395 * is not *a good idea*. 1396 */ 1397void 1398biodone(register struct buf * bp) 1399{ 1400 int s; 1401 1402 s = splbio(); 1403 if (!(bp->b_flags & B_BUSY)) 1404 panic("biodone: buffer not busy"); 1405 1406 if (bp->b_flags & B_DONE) { 1407 splx(s); 1408 printf("biodone: buffer already done\n"); 1409 return; 1410 } 1411 bp->b_flags |= B_DONE; 1412 1413 if ((bp->b_flags & B_READ) == 0) { 1414 vwakeup(bp); 1415 } 1416#ifdef BOUNCE_BUFFERS 1417 if (bp->b_flags & B_BOUNCE) 1418 vm_bounce_free(bp); 1419#endif 1420 1421 /* call optional completion function if requested */ 1422 if (bp->b_flags & B_CALL) { 1423 bp->b_flags &= ~B_CALL; 1424 (*bp->b_iodone) (bp); 1425 splx(s); 1426 return; 1427 } 1428 if (bp->b_flags & B_VMIO) { 1429 int i, resid; 1430 vm_ooffset_t foff; 1431 vm_page_t m; 1432 vm_object_t obj; 1433 int iosize; 1434 struct vnode *vp = bp->b_vp; 1435 1436 if (vp->v_type == VBLK) 1437 foff = (vm_ooffset_t) DEV_BSIZE * bp->b_lblkno; 1438 else 1439 foff = (vm_ooffset_t) vp->v_mount->mnt_stat.f_iosize * bp->b_lblkno; 1440 obj = vp->v_object; 1441 if (!obj) { 1442 panic("biodone: no object"); 1443 } 1444#if defined(VFS_BIO_DEBUG) 1445 if (obj->paging_in_progress < bp->b_npages) { 1446 printf("biodone: paging in progress(%d) < bp->b_npages(%d)\n", 1447 obj->paging_in_progress, bp->b_npages); 1448 } 1449#endif 1450 iosize = bp->b_bufsize; 1451 for (i = 0; i < bp->b_npages; i++) { 1452 int bogusflag = 0; 1453 m = bp->b_pages[i]; 1454 if (m == bogus_page) { 1455 bogusflag = 1; 1456 m = vm_page_lookup(obj, OFF_TO_IDX(foff)); 1457 if (!m) { 1458#if defined(VFS_BIO_DEBUG) 1459 printf("biodone: page disappeared\n"); 1460#endif 1461 --obj->paging_in_progress; 1462 continue; 1463 } 1464 bp->b_pages[i] = m; 1465 pmap_qenter(trunc_page(bp->b_data), bp->b_pages, bp->b_npages); 1466 } 1467#if defined(VFS_BIO_DEBUG) 1468 if (OFF_TO_IDX(foff) != m->pindex) { 1469 printf("biodone: foff(%d)/m->pindex(%d) mismatch\n", foff, m->pindex); 1470 } 1471#endif 1472 resid = IDX_TO_OFF(m->pindex + 1) - foff; 1473 if (resid > iosize) 1474 resid = iosize; 1475 /* 1476 * In the write case, the valid and clean bits are 1477 * already changed correctly, so we only need to do this 1478 * here in the read case. 1479 */ 1480 if ((bp->b_flags & B_READ) && !bogusflag && resid > 0) { 1481 vm_page_set_validclean(m, 1482 (vm_offset_t) (foff & PAGE_MASK), resid); 1483 } 1484 1485 /* 1486 * when debugging new filesystems or buffer I/O methods, this 1487 * is the most common error that pops up. if you see this, you 1488 * have not set the page busy flag correctly!!! 1489 */ 1490 if (m->busy == 0) { 1491 printf("biodone: page busy < 0, " 1492 "pindex: %d, foff: 0x(%x,%x), " 1493 "resid: %d, index: %d\n", 1494 (int) m->pindex, (int)(foff >> 32), 1495 (int) foff & 0xffffffff, resid, i); 1496 if (vp->v_type != VBLK) 1497 printf(" iosize: %ld, lblkno: %d, flags: 0x%lx, npages: %d\n", 1498 bp->b_vp->v_mount->mnt_stat.f_iosize, 1499 (int) bp->b_lblkno, 1500 bp->b_flags, bp->b_npages); 1501 else 1502 printf(" VDEV, lblkno: %d, flags: 0x%lx, npages: %d\n", 1503 (int) bp->b_lblkno, 1504 bp->b_flags, bp->b_npages); 1505 printf(" valid: 0x%x, dirty: 0x%x, wired: %d\n", 1506 m->valid, m->dirty, m->wire_count); 1507 panic("biodone: page busy < 0\n"); 1508 } 1509 --m->busy; 1510 if ((m->busy == 0) && (m->flags & PG_WANTED)) { 1511 m->flags &= ~PG_WANTED; 1512 wakeup(m); 1513 } 1514 --obj->paging_in_progress; 1515 foff += resid; 1516 iosize -= resid; 1517 } 1518 if (obj && obj->paging_in_progress == 0 && 1519 (obj->flags & OBJ_PIPWNT)) { 1520 obj->flags &= ~OBJ_PIPWNT; 1521 wakeup(obj); 1522 } 1523 } 1524 /* 1525 * For asynchronous completions, release the buffer now. The brelse 1526 * checks for B_WANTED and will do the wakeup there if necessary - so 1527 * no need to do a wakeup here in the async case. 1528 */ 1529 1530 if (bp->b_flags & B_ASYNC) { 1531 if ((bp->b_flags & (B_NOCACHE | B_INVAL | B_ERROR | B_RELBUF)) != 0) 1532 brelse(bp); 1533 else 1534 bqrelse(bp); 1535 } else { 1536 wakeup(bp); 1537 } 1538 splx(s); 1539} 1540 1541int 1542count_lock_queue() 1543{ 1544 int count; 1545 struct buf *bp; 1546 1547 count = 0; 1548 for (bp = TAILQ_FIRST(&bufqueues[QUEUE_LOCKED]); 1549 bp != NULL; 1550 bp = TAILQ_NEXT(bp, b_freelist)) 1551 count++; 1552 return (count); 1553} 1554 1555int vfs_update_interval = 30; 1556 1557static void 1558vfs_update() 1559{ 1560 (void) spl0(); /* XXX redundant? wrong place? */ 1561 while (1) { 1562 tsleep(&vfs_update_wakeup, PUSER, "update", 1563 hz * vfs_update_interval); 1564 vfs_update_wakeup = 0; 1565 sync(curproc, NULL, NULL); 1566 } 1567} 1568 1569static int 1570sysctl_kern_updateinterval SYSCTL_HANDLER_ARGS 1571{ 1572 int error = sysctl_handle_int(oidp, 1573 oidp->oid_arg1, oidp->oid_arg2, req); 1574 if (!error) 1575 wakeup(&vfs_update_wakeup); 1576 return error; 1577} 1578 1579SYSCTL_PROC(_kern, KERN_UPDATEINTERVAL, update, CTLTYPE_INT|CTLFLAG_RW, 1580 &vfs_update_interval, 0, sysctl_kern_updateinterval, "I", ""); 1581 1582 1583/* 1584 * This routine is called in lieu of iodone in the case of 1585 * incomplete I/O. This keeps the busy status for pages 1586 * consistant. 1587 */ 1588void 1589vfs_unbusy_pages(struct buf * bp) 1590{ 1591 int i; 1592 1593 if (bp->b_flags & B_VMIO) { 1594 struct vnode *vp = bp->b_vp; 1595 vm_object_t obj = vp->v_object; 1596 vm_ooffset_t foff; 1597 1598 foff = (vm_ooffset_t) vp->v_mount->mnt_stat.f_iosize * bp->b_lblkno; 1599 1600 for (i = 0; i < bp->b_npages; i++) { 1601 vm_page_t m = bp->b_pages[i]; 1602 1603 if (m == bogus_page) { 1604 m = vm_page_lookup(obj, OFF_TO_IDX(foff) + i); 1605 if (!m) { 1606 panic("vfs_unbusy_pages: page missing\n"); 1607 } 1608 bp->b_pages[i] = m; 1609 pmap_qenter(trunc_page(bp->b_data), bp->b_pages, bp->b_npages); 1610 } 1611 --obj->paging_in_progress; 1612 --m->busy; 1613 if ((m->busy == 0) && (m->flags & PG_WANTED)) { 1614 m->flags &= ~PG_WANTED; 1615 wakeup(m); 1616 } 1617 } 1618 if (obj->paging_in_progress == 0 && 1619 (obj->flags & OBJ_PIPWNT)) { 1620 obj->flags &= ~OBJ_PIPWNT; 1621 wakeup(obj); 1622 } 1623 } 1624} 1625 1626/* 1627 * This routine is called before a device strategy routine. 1628 * It is used to tell the VM system that paging I/O is in 1629 * progress, and treat the pages associated with the buffer 1630 * almost as being PG_BUSY. Also the object paging_in_progress 1631 * flag is handled to make sure that the object doesn't become 1632 * inconsistant. 1633 */ 1634void 1635vfs_busy_pages(struct buf * bp, int clear_modify) 1636{ 1637 int i; 1638 1639 if (bp->b_flags & B_VMIO) { 1640 vm_object_t obj = bp->b_vp->v_object; 1641 vm_ooffset_t foff; 1642 int iocount = bp->b_bufsize; 1643 1644 if (bp->b_vp->v_type == VBLK) 1645 foff = (vm_ooffset_t) DEV_BSIZE * bp->b_lblkno; 1646 else 1647 foff = (vm_ooffset_t) bp->b_vp->v_mount->mnt_stat.f_iosize * bp->b_lblkno; 1648 vfs_setdirty(bp); 1649 for (i = 0; i < bp->b_npages; i++) { 1650 vm_page_t m = bp->b_pages[i]; 1651 int resid = IDX_TO_OFF(m->pindex + 1) - foff; 1652 1653 if (resid > iocount) 1654 resid = iocount; 1655 if ((bp->b_flags & B_CLUSTER) == 0) { 1656 obj->paging_in_progress++; 1657 m->busy++; 1658 } 1659 vm_page_protect(m, VM_PROT_NONE); 1660 if (clear_modify) { 1661 vm_page_set_validclean(m, 1662 (vm_offset_t) (foff & PAGE_MASK), resid); 1663 } else if (bp->b_bcount >= PAGE_SIZE) { 1664 if (m->valid && (bp->b_flags & B_CACHE) == 0) { 1665 bp->b_pages[i] = bogus_page; 1666 pmap_qenter(trunc_page(bp->b_data), bp->b_pages, bp->b_npages); 1667 } 1668 } 1669 foff += resid; 1670 iocount -= resid; 1671 } 1672 } 1673} 1674 1675/* 1676 * Tell the VM system that the pages associated with this buffer 1677 * are clean. This is used for delayed writes where the data is 1678 * going to go to disk eventually without additional VM intevention. 1679 */ 1680void 1681vfs_clean_pages(struct buf * bp) 1682{ 1683 int i; 1684 1685 if (bp->b_flags & B_VMIO) { 1686 vm_ooffset_t foff; 1687 int iocount = bp->b_bufsize; 1688 1689 if (bp->b_vp->v_type == VBLK) 1690 foff = (vm_ooffset_t) DEV_BSIZE * bp->b_lblkno; 1691 else 1692 foff = (vm_ooffset_t) bp->b_vp->v_mount->mnt_stat.f_iosize * bp->b_lblkno; 1693 1694 for (i = 0; i < bp->b_npages; i++) { 1695 vm_page_t m = bp->b_pages[i]; 1696 int resid = IDX_TO_OFF(m->pindex + 1) - foff; 1697 1698 if (resid > iocount) 1699 resid = iocount; 1700 if (resid > 0) { 1701 vm_page_set_validclean(m, 1702 ((vm_offset_t) foff & PAGE_MASK), resid); 1703 } 1704 foff += resid; 1705 iocount -= resid; 1706 } 1707 } 1708} 1709 1710void 1711vfs_bio_clrbuf(struct buf *bp) { 1712 int i; 1713 if( bp->b_flags & B_VMIO) { 1714 if( (bp->b_npages == 1) && (bp->b_bufsize < PAGE_SIZE)) { 1715 int mask; 1716 mask = 0; 1717 for(i=0;i<bp->b_bufsize;i+=DEV_BSIZE) 1718 mask |= (1 << (i/DEV_BSIZE)); 1719 if( bp->b_pages[0]->valid != mask) { 1720 bzero(bp->b_data, bp->b_bufsize); 1721 } 1722 bp->b_pages[0]->valid = mask; 1723 bp->b_resid = 0; 1724 return; 1725 } 1726 for(i=0;i<bp->b_npages;i++) { 1727 if( bp->b_pages[i]->valid == VM_PAGE_BITS_ALL) 1728 continue; 1729 if( bp->b_pages[i]->valid == 0) { 1730 if ((bp->b_pages[i]->flags & PG_ZERO) == 0) { 1731 bzero(bp->b_data + (i << PAGE_SHIFT), PAGE_SIZE); 1732 } 1733 } else { 1734 int j; 1735 for(j=0;j<PAGE_SIZE/DEV_BSIZE;j++) { 1736 if( (bp->b_pages[i]->valid & (1<<j)) == 0) 1737 bzero(bp->b_data + (i << PAGE_SHIFT) + j * DEV_BSIZE, DEV_BSIZE); 1738 } 1739 } 1740 /* bp->b_pages[i]->valid = VM_PAGE_BITS_ALL; */ 1741 } 1742 bp->b_resid = 0; 1743 } else { 1744 clrbuf(bp); 1745 } 1746} 1747 1748/* 1749 * vm_hold_load_pages and vm_hold_unload pages get pages into 1750 * a buffers address space. The pages are anonymous and are 1751 * not associated with a file object. 1752 */ 1753void 1754vm_hold_load_pages(struct buf * bp, vm_offset_t from, vm_offset_t to) 1755{ 1756 vm_offset_t pg; 1757 vm_page_t p; 1758 int index; 1759 1760 to = round_page(to); 1761 from = round_page(from); 1762 index = (from - trunc_page(bp->b_data)) >> PAGE_SHIFT; 1763 1764 for (pg = from; pg < to; pg += PAGE_SIZE, index++) { 1765 1766tryagain: 1767 1768 p = vm_page_alloc(kernel_object, ((pg - VM_MIN_KERNEL_ADDRESS) >> PAGE_SHIFT), 1769 VM_ALLOC_NORMAL); 1770 if (!p) { 1771 VM_WAIT; 1772 goto tryagain; 1773 } 1774 vm_page_wire(p); 1775 pmap_kenter(pg, VM_PAGE_TO_PHYS(p)); 1776 bp->b_pages[index] = p; 1777 PAGE_WAKEUP(p); 1778 } 1779 bp->b_npages = to >> PAGE_SHIFT; 1780} 1781 1782void 1783vm_hold_free_pages(struct buf * bp, vm_offset_t from, vm_offset_t to) 1784{ 1785 vm_offset_t pg; 1786 vm_page_t p; 1787 int index; 1788 1789 from = round_page(from); 1790 to = round_page(to); 1791 index = (from - trunc_page(bp->b_data)) >> PAGE_SHIFT; 1792 1793 for (pg = from; pg < to; pg += PAGE_SIZE, index++) { 1794 p = bp->b_pages[index]; 1795 if (p && (index < bp->b_npages)) { 1796 if (p->busy) { 1797 printf("vm_hold_free_pages: blkno: %d, lblkno: %d\n", 1798 bp->b_blkno, bp->b_lblkno); 1799 } 1800 bp->b_pages[index] = NULL; 1801 pmap_kremove(pg); 1802 vm_page_unwire(p); 1803 vm_page_free(p); 1804 } 1805 } 1806 bp->b_npages = from >> PAGE_SHIFT; 1807} 1808