vfs_bio.c revision 17761
1/* 2 * Copyright (c) 1994 John S. Dyson 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice immediately at the beginning of the file, without modification, 10 * this list of conditions, and the following disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 3. Absolutely no warranty of function or purpose is made by the author 15 * John S. Dyson. 16 * 4. This work was done expressly for inclusion into FreeBSD. Other use 17 * is allowed if this notation is included. 18 * 5. Modifications may be freely made to this file if the above conditions 19 * are met. 20 * 21 * $Id: vfs_bio.c,v 1.95 1996/08/04 20:13:08 phk Exp $ 22 */ 23 24/* 25 * this file contains a new buffer I/O scheme implementing a coherent 26 * VM object and buffer cache scheme. Pains have been taken to make 27 * sure that the performance degradation associated with schemes such 28 * as this is not realized. 29 * 30 * Author: John S. Dyson 31 * Significant help during the development and debugging phases 32 * had been provided by David Greenman, also of the FreeBSD core team. 33 */ 34 35#include "opt_bounce.h" 36 37#define VMIO 38#include <sys/param.h> 39#include <sys/systm.h> 40#include <sys/sysproto.h> 41#include <sys/kernel.h> 42#include <sys/sysctl.h> 43#include <sys/proc.h> 44#include <sys/vnode.h> 45#include <sys/vmmeter.h> 46#include <vm/vm.h> 47#include <vm/vm_param.h> 48#include <vm/vm_prot.h> 49#include <vm/vm_kern.h> 50#include <vm/vm_pageout.h> 51#include <vm/vm_page.h> 52#include <vm/vm_object.h> 53#include <vm/vm_extern.h> 54#include <sys/buf.h> 55#include <sys/mount.h> 56#include <sys/malloc.h> 57#include <sys/resourcevar.h> 58#include <sys/proc.h> 59 60#include <miscfs/specfs/specdev.h> 61 62static void vfs_update __P((void)); 63static struct proc *updateproc; 64static struct kproc_desc up_kp = { 65 "update", 66 vfs_update, 67 &updateproc 68}; 69SYSINIT_KT(update, SI_SUB_KTHREAD_UPDATE, SI_ORDER_FIRST, kproc_start, &up_kp) 70 71struct buf *buf; /* buffer header pool */ 72struct swqueue bswlist; 73 74int count_lock_queue __P((void)); 75static void vm_hold_free_pages(struct buf * bp, vm_offset_t from, 76 vm_offset_t to); 77static void vm_hold_load_pages(struct buf * bp, vm_offset_t from, 78 vm_offset_t to); 79static void vfs_clean_pages(struct buf * bp); 80static void vfs_setdirty(struct buf *bp); 81static void vfs_vmio_release(struct buf *bp); 82 83int needsbuffer; 84 85/* 86 * Internal update daemon, process 3 87 * The variable vfs_update_wakeup allows for internal syncs. 88 */ 89int vfs_update_wakeup; 90 91 92/* 93 * buffers base kva 94 */ 95caddr_t buffers_kva; 96 97/* 98 * bogus page -- for I/O to/from partially complete buffers 99 * this is a temporary solution to the problem, but it is not 100 * really that bad. it would be better to split the buffer 101 * for input in the case of buffers partially already in memory, 102 * but the code is intricate enough already. 103 */ 104vm_page_t bogus_page; 105static vm_offset_t bogus_offset; 106 107static int bufspace, maxbufspace, vmiospace, maxvmiobufspace, 108 bufmallocspace, maxbufmallocspace; 109 110static struct bufhashhdr bufhashtbl[BUFHSZ], invalhash; 111static struct bqueues bufqueues[BUFFER_QUEUES]; 112 113extern int vm_swap_size; 114 115#define BUF_MAXUSE 8 116/* 117#define NO_B_MALLOC 118*/ 119 120/* 121 * Initialize buffer headers and related structures. 122 */ 123void 124bufinit() 125{ 126 struct buf *bp; 127 int i; 128 129 TAILQ_INIT(&bswlist); 130 LIST_INIT(&invalhash); 131 132 /* first, make a null hash table */ 133 for (i = 0; i < BUFHSZ; i++) 134 LIST_INIT(&bufhashtbl[i]); 135 136 /* next, make a null set of free lists */ 137 for (i = 0; i < BUFFER_QUEUES; i++) 138 TAILQ_INIT(&bufqueues[i]); 139 140 buffers_kva = (caddr_t) kmem_alloc_pageable(buffer_map, MAXBSIZE * nbuf); 141 /* finally, initialize each buffer header and stick on empty q */ 142 for (i = 0; i < nbuf; i++) { 143 bp = &buf[i]; 144 bzero(bp, sizeof *bp); 145 bp->b_flags = B_INVAL; /* we're just an empty header */ 146 bp->b_dev = NODEV; 147 bp->b_rcred = NOCRED; 148 bp->b_wcred = NOCRED; 149 bp->b_qindex = QUEUE_EMPTY; 150 bp->b_vnbufs.le_next = NOLIST; 151 bp->b_data = buffers_kva + i * MAXBSIZE; 152 TAILQ_INSERT_TAIL(&bufqueues[QUEUE_EMPTY], bp, b_freelist); 153 LIST_INSERT_HEAD(&invalhash, bp, b_hash); 154 } 155/* 156 * maxbufspace is currently calculated to support all filesystem blocks 157 * to be 8K. If you happen to use a 16K filesystem, the size of the buffer 158 * cache is still the same as it would be for 8K filesystems. This 159 * keeps the size of the buffer cache "in check" for big block filesystems. 160 */ 161 maxbufspace = 2 * (nbuf + 8) * PAGE_SIZE; 162/* 163 * reserve 1/3 of the buffers for metadata (VDIR) which might not be VMIO'ed 164 */ 165 maxvmiobufspace = 2 * maxbufspace / 3; 166/* 167 * Limit the amount of malloc memory since it is wired permanently into 168 * the kernel space. Even though this is accounted for in the buffer 169 * allocation, we don't want the malloced region to grow uncontrolled. 170 * The malloc scheme improves memory utilization significantly on average 171 * (small) directories. 172 */ 173 maxbufmallocspace = maxbufspace / 20; 174 175 bogus_offset = kmem_alloc_pageable(kernel_map, PAGE_SIZE); 176 bogus_page = vm_page_alloc(kernel_object, 177 ((bogus_offset - VM_MIN_KERNEL_ADDRESS) >> PAGE_SHIFT), 178 VM_ALLOC_NORMAL); 179 180} 181 182/* 183 * remove the buffer from the appropriate free list 184 */ 185void 186bremfree(struct buf * bp) 187{ 188 int s = splbio(); 189 190 if (bp->b_qindex != QUEUE_NONE) { 191 TAILQ_REMOVE(&bufqueues[bp->b_qindex], bp, b_freelist); 192 bp->b_qindex = QUEUE_NONE; 193 } else { 194 panic("bremfree: removing a buffer when not on a queue"); 195 } 196 splx(s); 197} 198 199/* 200 * Get a buffer with the specified data. Look in the cache first. 201 */ 202int 203bread(struct vnode * vp, daddr_t blkno, int size, struct ucred * cred, 204 struct buf ** bpp) 205{ 206 struct buf *bp; 207 208 bp = getblk(vp, blkno, size, 0, 0); 209 *bpp = bp; 210 211 /* if not found in cache, do some I/O */ 212 if ((bp->b_flags & B_CACHE) == 0) { 213 if (curproc != NULL) 214 curproc->p_stats->p_ru.ru_inblock++; 215 bp->b_flags |= B_READ; 216 bp->b_flags &= ~(B_DONE | B_ERROR | B_INVAL); 217 if (bp->b_rcred == NOCRED) { 218 if (cred != NOCRED) 219 crhold(cred); 220 bp->b_rcred = cred; 221 } 222 vfs_busy_pages(bp, 0); 223 VOP_STRATEGY(bp); 224 return (biowait(bp)); 225 } 226 return (0); 227} 228 229/* 230 * Operates like bread, but also starts asynchronous I/O on 231 * read-ahead blocks. 232 */ 233int 234breadn(struct vnode * vp, daddr_t blkno, int size, 235 daddr_t * rablkno, int *rabsize, 236 int cnt, struct ucred * cred, struct buf ** bpp) 237{ 238 struct buf *bp, *rabp; 239 int i; 240 int rv = 0, readwait = 0; 241 242 *bpp = bp = getblk(vp, blkno, size, 0, 0); 243 244 /* if not found in cache, do some I/O */ 245 if ((bp->b_flags & B_CACHE) == 0) { 246 if (curproc != NULL) 247 curproc->p_stats->p_ru.ru_inblock++; 248 bp->b_flags |= B_READ; 249 bp->b_flags &= ~(B_DONE | B_ERROR | B_INVAL); 250 if (bp->b_rcred == NOCRED) { 251 if (cred != NOCRED) 252 crhold(cred); 253 bp->b_rcred = cred; 254 } 255 vfs_busy_pages(bp, 0); 256 VOP_STRATEGY(bp); 257 ++readwait; 258 } 259 for (i = 0; i < cnt; i++, rablkno++, rabsize++) { 260 if (inmem(vp, *rablkno)) 261 continue; 262 rabp = getblk(vp, *rablkno, *rabsize, 0, 0); 263 264 if ((rabp->b_flags & B_CACHE) == 0) { 265 if (curproc != NULL) 266 curproc->p_stats->p_ru.ru_inblock++; 267 rabp->b_flags |= B_READ | B_ASYNC; 268 rabp->b_flags &= ~(B_DONE | B_ERROR | B_INVAL); 269 if (rabp->b_rcred == NOCRED) { 270 if (cred != NOCRED) 271 crhold(cred); 272 rabp->b_rcred = cred; 273 } 274 vfs_busy_pages(rabp, 0); 275 VOP_STRATEGY(rabp); 276 } else { 277 brelse(rabp); 278 } 279 } 280 281 if (readwait) { 282 rv = biowait(bp); 283 } 284 return (rv); 285} 286 287/* 288 * Write, release buffer on completion. (Done by iodone 289 * if async.) 290 */ 291int 292bwrite(struct buf * bp) 293{ 294 int oldflags = bp->b_flags; 295 296 if (bp->b_flags & B_INVAL) { 297 brelse(bp); 298 return (0); 299 } 300 if (!(bp->b_flags & B_BUSY)) 301 panic("bwrite: buffer is not busy???"); 302 303 bp->b_flags &= ~(B_READ | B_DONE | B_ERROR | B_DELWRI); 304 bp->b_flags |= B_WRITEINPROG; 305 306 if ((oldflags & (B_ASYNC|B_DELWRI)) == (B_ASYNC|B_DELWRI)) { 307 reassignbuf(bp, bp->b_vp); 308 } 309 310 bp->b_vp->v_numoutput++; 311 vfs_busy_pages(bp, 1); 312 if (curproc != NULL) 313 curproc->p_stats->p_ru.ru_oublock++; 314 VOP_STRATEGY(bp); 315 316 if ((oldflags & B_ASYNC) == 0) { 317 int rtval = biowait(bp); 318 319 if (oldflags & B_DELWRI) { 320 reassignbuf(bp, bp->b_vp); 321 } 322 brelse(bp); 323 return (rtval); 324 } 325 return (0); 326} 327 328int 329vn_bwrite(ap) 330 struct vop_bwrite_args *ap; 331{ 332 return (bwrite(ap->a_bp)); 333} 334 335/* 336 * Delayed write. (Buffer is marked dirty). 337 */ 338void 339bdwrite(struct buf * bp) 340{ 341 342 if ((bp->b_flags & B_BUSY) == 0) { 343 panic("bdwrite: buffer is not busy"); 344 } 345 if (bp->b_flags & B_INVAL) { 346 brelse(bp); 347 return; 348 } 349 if (bp->b_flags & B_TAPE) { 350 bawrite(bp); 351 return; 352 } 353 bp->b_flags &= ~(B_READ|B_RELBUF); 354 if ((bp->b_flags & B_DELWRI) == 0) { 355 bp->b_flags |= B_DONE | B_DELWRI; 356 reassignbuf(bp, bp->b_vp); 357 } 358 359 /* 360 * This bmap keeps the system from needing to do the bmap later, 361 * perhaps when the system is attempting to do a sync. Since it 362 * is likely that the indirect block -- or whatever other datastructure 363 * that the filesystem needs is still in memory now, it is a good 364 * thing to do this. Note also, that if the pageout daemon is 365 * requesting a sync -- there might not be enough memory to do 366 * the bmap then... So, this is important to do. 367 */ 368 if( bp->b_lblkno == bp->b_blkno) { 369 VOP_BMAP(bp->b_vp, bp->b_lblkno, NULL, &bp->b_blkno, NULL, NULL); 370 } 371 372 /* 373 * Set the *dirty* buffer range based upon the VM system dirty pages. 374 */ 375 vfs_setdirty(bp); 376 377 /* 378 * We need to do this here to satisfy the vnode_pager and the 379 * pageout daemon, so that it thinks that the pages have been 380 * "cleaned". Note that since the pages are in a delayed write 381 * buffer -- the VFS layer "will" see that the pages get written 382 * out on the next sync, or perhaps the cluster will be completed. 383 */ 384 vfs_clean_pages(bp); 385 bqrelse(bp); 386 return; 387} 388 389/* 390 * Asynchronous write. 391 * Start output on a buffer, but do not wait for it to complete. 392 * The buffer is released when the output completes. 393 */ 394void 395bawrite(struct buf * bp) 396{ 397 bp->b_flags |= B_ASYNC; 398 (void) VOP_BWRITE(bp); 399} 400 401/* 402 * Release a buffer. 403 */ 404void 405brelse(struct buf * bp) 406{ 407 int s; 408 409 if (bp->b_flags & B_CLUSTER) { 410 relpbuf(bp); 411 return; 412 } 413 /* anyone need a "free" block? */ 414 s = splbio(); 415 416 /* anyone need this block? */ 417 if (bp->b_flags & B_WANTED) { 418 bp->b_flags &= ~(B_WANTED | B_AGE); 419 wakeup(bp); 420 } 421 422 if (bp->b_flags & B_LOCKED) 423 bp->b_flags &= ~B_ERROR; 424 425 if ((bp->b_flags & (B_NOCACHE | B_INVAL | B_ERROR)) || 426 (bp->b_bufsize <= 0)) { 427 bp->b_flags |= B_INVAL; 428 bp->b_flags &= ~(B_DELWRI | B_CACHE); 429 if (((bp->b_flags & B_VMIO) == 0) && bp->b_vp) { 430 if (bp->b_bufsize) 431 allocbuf(bp, 0); 432 brelvp(bp); 433 } 434 } 435 436 /* 437 * VMIO buffer rundown. It is not very necessary to keep a VMIO buffer 438 * constituted, so the B_INVAL flag is used to *invalidate* the buffer, 439 * but the VM object is kept around. The B_NOCACHE flag is used to 440 * invalidate the pages in the VM object. 441 */ 442 if (bp->b_flags & B_VMIO) { 443 vm_ooffset_t foff; 444 vm_object_t obj; 445 int i, resid; 446 vm_page_t m; 447 struct vnode *vp; 448 int iototal = bp->b_bufsize; 449 450 vp = bp->b_vp; 451 if (!vp) 452 panic("brelse: missing vp"); 453 454 if (bp->b_npages) { 455 vm_pindex_t poff; 456 obj = (vm_object_t) vp->v_object; 457 if (vp->v_type == VBLK) 458 foff = ((vm_ooffset_t) bp->b_lblkno) << DEV_BSHIFT; 459 else 460 foff = (vm_ooffset_t) vp->v_mount->mnt_stat.f_iosize * bp->b_lblkno; 461 poff = OFF_TO_IDX(foff); 462 for (i = 0; i < bp->b_npages; i++) { 463 m = bp->b_pages[i]; 464 if (m == bogus_page) { 465 m = vm_page_lookup(obj, poff + i); 466 if (!m) { 467 panic("brelse: page missing\n"); 468 } 469 bp->b_pages[i] = m; 470 pmap_qenter(trunc_page(bp->b_data), 471 bp->b_pages, bp->b_npages); 472 } 473 resid = IDX_TO_OFF(m->pindex+1) - foff; 474 if (resid > iototal) 475 resid = iototal; 476 if (resid > 0) { 477 /* 478 * Don't invalidate the page if the local machine has already 479 * modified it. This is the lesser of two evils, and should 480 * be fixed. 481 */ 482 if (bp->b_flags & (B_NOCACHE | B_ERROR)) { 483 vm_page_test_dirty(m); 484 if (m->dirty == 0) { 485 vm_page_set_invalid(m, (vm_offset_t) foff, resid); 486 if (m->valid == 0) 487 vm_page_protect(m, VM_PROT_NONE); 488 } 489 } 490 if (resid >= PAGE_SIZE) { 491 if ((m->valid & VM_PAGE_BITS_ALL) != VM_PAGE_BITS_ALL) { 492 bp->b_flags |= B_INVAL; 493 } 494 } else { 495 if (!vm_page_is_valid(m, 496 (((vm_offset_t) bp->b_data) & PAGE_MASK), resid)) { 497 bp->b_flags |= B_INVAL; 498 } 499 } 500 } 501 foff += resid; 502 iototal -= resid; 503 } 504 } 505 if (bp->b_flags & (B_INVAL | B_RELBUF)) 506 vfs_vmio_release(bp); 507 } 508 if (bp->b_qindex != QUEUE_NONE) 509 panic("brelse: free buffer onto another queue???"); 510 511 /* enqueue */ 512 /* buffers with no memory */ 513 if (bp->b_bufsize == 0) { 514 bp->b_qindex = QUEUE_EMPTY; 515 TAILQ_INSERT_HEAD(&bufqueues[QUEUE_EMPTY], bp, b_freelist); 516 LIST_REMOVE(bp, b_hash); 517 LIST_INSERT_HEAD(&invalhash, bp, b_hash); 518 bp->b_dev = NODEV; 519 if (needsbuffer) { 520 wakeup(&needsbuffer); 521 needsbuffer=0; 522 } 523 /* buffers with junk contents */ 524 } else if (bp->b_flags & (B_ERROR | B_INVAL | B_NOCACHE | B_RELBUF)) { 525 bp->b_qindex = QUEUE_AGE; 526 TAILQ_INSERT_HEAD(&bufqueues[QUEUE_AGE], bp, b_freelist); 527 LIST_REMOVE(bp, b_hash); 528 LIST_INSERT_HEAD(&invalhash, bp, b_hash); 529 bp->b_dev = NODEV; 530 if (needsbuffer) { 531 wakeup(&needsbuffer); 532 needsbuffer=0; 533 } 534 /* buffers that are locked */ 535 } else if (bp->b_flags & B_LOCKED) { 536 bp->b_qindex = QUEUE_LOCKED; 537 TAILQ_INSERT_TAIL(&bufqueues[QUEUE_LOCKED], bp, b_freelist); 538 /* buffers with stale but valid contents */ 539 } else if (bp->b_flags & B_AGE) { 540 bp->b_qindex = QUEUE_AGE; 541 TAILQ_INSERT_TAIL(&bufqueues[QUEUE_AGE], bp, b_freelist); 542 if (needsbuffer) { 543 wakeup(&needsbuffer); 544 needsbuffer=0; 545 } 546 /* buffers with valid and quite potentially reuseable contents */ 547 } else { 548 bp->b_qindex = QUEUE_LRU; 549 TAILQ_INSERT_TAIL(&bufqueues[QUEUE_LRU], bp, b_freelist); 550 if (needsbuffer) { 551 wakeup(&needsbuffer); 552 needsbuffer=0; 553 } 554 } 555 556 /* unlock */ 557 bp->b_flags &= ~(B_WANTED | B_BUSY | B_ASYNC | B_NOCACHE | B_AGE | B_RELBUF); 558 splx(s); 559} 560 561/* 562 * Release a buffer. 563 */ 564void 565bqrelse(struct buf * bp) 566{ 567 int s; 568 569 s = splbio(); 570 571 572 /* anyone need this block? */ 573 if (bp->b_flags & B_WANTED) { 574 bp->b_flags &= ~(B_WANTED | B_AGE); 575 wakeup(bp); 576 } 577 578 if (bp->b_qindex != QUEUE_NONE) 579 panic("bqrelse: free buffer onto another queue???"); 580 581 if (bp->b_flags & B_LOCKED) { 582 bp->b_flags &= ~B_ERROR; 583 bp->b_qindex = QUEUE_LOCKED; 584 TAILQ_INSERT_TAIL(&bufqueues[QUEUE_LOCKED], bp, b_freelist); 585 /* buffers with stale but valid contents */ 586 } else { 587 bp->b_qindex = QUEUE_LRU; 588 TAILQ_INSERT_TAIL(&bufqueues[QUEUE_LRU], bp, b_freelist); 589 if (needsbuffer) { 590 wakeup(&needsbuffer); 591 needsbuffer=0; 592 } 593 } 594 595 /* unlock */ 596 bp->b_flags &= ~(B_WANTED | B_BUSY | B_ASYNC | B_NOCACHE | B_AGE | B_RELBUF); 597 splx(s); 598} 599 600static void 601vfs_vmio_release(bp) 602 struct buf *bp; 603{ 604 int i; 605 vm_page_t m; 606 607 for (i = 0; i < bp->b_npages; i++) { 608 m = bp->b_pages[i]; 609 bp->b_pages[i] = NULL; 610 if (m->flags & PG_WANTED) { 611 m->flags &= ~PG_WANTED; 612 wakeup(m); 613 } 614 vm_page_unwire(m); 615 if (m->wire_count == 0 && (m->flags & PG_BUSY) == 0) { 616 if (m->valid) { 617 if(m->dirty == 0) 618 vm_page_test_dirty(m); 619 /* 620 * this keeps pressure off of the process memory 621 */ 622 if ((vm_swap_size == 0) || 623 (cnt.v_free_count < cnt.v_free_min)) { 624 if ((m->dirty == 0) && 625 (m->hold_count == 0) && 626 (m->flags & PG_BUSY) == 0 && 627 (m->busy == 0)) 628 vm_page_cache(m); 629 else 630 vm_page_deactivate(m); 631 } 632 } else if ((m->hold_count == 0) && 633 ((m->flags & PG_BUSY) == 0) && 634 (m->busy == 0)) { 635 vm_page_protect(m, VM_PROT_NONE); 636 vm_page_free(m); 637 } 638 } 639 } 640 bufspace -= bp->b_bufsize; 641 vmiospace -= bp->b_bufsize; 642 pmap_qremove(trunc_page((vm_offset_t) bp->b_data), bp->b_npages); 643 bp->b_npages = 0; 644 bp->b_bufsize = 0; 645 bp->b_flags &= ~B_VMIO; 646 if (bp->b_vp) 647 brelvp(bp); 648} 649 650/* 651 * Check to see if a block is currently memory resident. 652 */ 653__inline struct buf * 654gbincore(struct vnode * vp, daddr_t blkno) 655{ 656 struct buf *bp; 657 struct bufhashhdr *bh; 658 659 bh = BUFHASH(vp, blkno); 660 bp = bh->lh_first; 661 662 /* Search hash chain */ 663 while (bp != NULL) { 664 /* hit */ 665 if (bp->b_vp == vp && bp->b_lblkno == blkno && 666 (bp->b_flags & B_INVAL) == 0) { 667 break; 668 } 669 bp = bp->b_hash.le_next; 670 } 671 return (bp); 672} 673 674/* 675 * this routine implements clustered async writes for 676 * clearing out B_DELWRI buffers... This is much better 677 * than the old way of writing only one buffer at a time. 678 */ 679int 680vfs_bio_awrite(struct buf * bp) 681{ 682 int i; 683 daddr_t lblkno = bp->b_lblkno; 684 struct vnode *vp = bp->b_vp; 685 int s; 686 int ncl; 687 struct buf *bpa; 688 int nwritten; 689 690 s = splbio(); 691 /* 692 * right now we support clustered writing only to regular files 693 */ 694 if ((vp->v_type == VREG) && 695 (vp->v_mount != 0) && /* Only on nodes that have the size info */ 696 (bp->b_flags & (B_CLUSTEROK | B_INVAL)) == B_CLUSTEROK) { 697 int size; 698 int maxcl; 699 700 size = vp->v_mount->mnt_stat.f_iosize; 701 maxcl = MAXPHYS / size; 702 703 for (i = 1; i < maxcl; i++) { 704 if ((bpa = gbincore(vp, lblkno + i)) && 705 ((bpa->b_flags & (B_BUSY | B_DELWRI | B_CLUSTEROK | B_INVAL)) == 706 (B_DELWRI | B_CLUSTEROK)) && 707 (bpa->b_bufsize == size)) { 708 if ((bpa->b_blkno == bpa->b_lblkno) || 709 (bpa->b_blkno != bp->b_blkno + ((i * size) >> DEV_BSHIFT))) 710 break; 711 } else { 712 break; 713 } 714 } 715 ncl = i; 716 /* 717 * this is a possible cluster write 718 */ 719 if (ncl != 1) { 720 nwritten = cluster_wbuild(vp, size, lblkno, ncl); 721 splx(s); 722 return nwritten; 723 } 724 } 725 bremfree(bp); 726 splx(s); 727 /* 728 * default (old) behavior, writing out only one block 729 */ 730 bp->b_flags |= B_BUSY | B_ASYNC; 731 nwritten = bp->b_bufsize; 732 (void) VOP_BWRITE(bp); 733 return nwritten; 734} 735 736 737/* 738 * Find a buffer header which is available for use. 739 */ 740static struct buf * 741getnewbuf(int slpflag, int slptimeo, int doingvmio) 742{ 743 struct buf *bp; 744 int nbyteswritten = 0; 745 746start: 747 if (bufspace >= maxbufspace) 748 goto trytofreespace; 749 750 /* can we constitute a new buffer? */ 751 if ((bp = TAILQ_FIRST(&bufqueues[QUEUE_EMPTY]))) { 752 if (bp->b_qindex != QUEUE_EMPTY) 753 panic("getnewbuf: inconsistent EMPTY queue, qindex=%d", 754 bp->b_qindex); 755 bp->b_flags |= B_BUSY; 756 bremfree(bp); 757 goto fillbuf; 758 } 759trytofreespace: 760 /* 761 * We keep the file I/O from hogging metadata I/O 762 * This is desirable because file data is cached in the 763 * VM/Buffer cache even if a buffer is freed. 764 */ 765 if ((bp = TAILQ_FIRST(&bufqueues[QUEUE_AGE]))) { 766 if (bp->b_qindex != QUEUE_AGE) 767 panic("getnewbuf: inconsistent AGE queue, qindex=%d", 768 bp->b_qindex); 769 } else if ((bp = TAILQ_FIRST(&bufqueues[QUEUE_LRU]))) { 770 if (bp->b_qindex != QUEUE_LRU) 771 panic("getnewbuf: inconsistent LRU queue, qindex=%d", 772 bp->b_qindex); 773 } 774 if (!bp) { 775 /* wait for a free buffer of any kind */ 776 needsbuffer = 1; 777 tsleep(&needsbuffer, 778 (PRIBIO + 1) | slpflag, "newbuf", slptimeo); 779 return (0); 780 } 781 782 /* 783 * We are fairly aggressive about freeing VMIO buffers, but since 784 * the buffering is intact without buffer headers, there is not 785 * much loss. We gain by maintaining non-VMIOed metadata in buffers. 786 */ 787 if ((bp->b_qindex == QUEUE_LRU) && (bp->b_usecount > 0)) { 788 if ((bp->b_flags & B_VMIO) == 0 || 789 (vmiospace < maxvmiobufspace)) { 790 --bp->b_usecount; 791 TAILQ_REMOVE(&bufqueues[QUEUE_LRU], bp, b_freelist); 792 if (TAILQ_FIRST(&bufqueues[QUEUE_LRU]) != NULL) { 793 TAILQ_INSERT_TAIL(&bufqueues[QUEUE_LRU], bp, b_freelist); 794 goto start; 795 } 796 TAILQ_INSERT_TAIL(&bufqueues[QUEUE_LRU], bp, b_freelist); 797 } 798 } 799 800 /* if we are a delayed write, convert to an async write */ 801 if ((bp->b_flags & (B_DELWRI | B_INVAL)) == B_DELWRI) { 802 nbyteswritten += vfs_bio_awrite(bp); 803 if (!slpflag && !slptimeo) { 804 return (0); 805 } 806 goto start; 807 } 808 809 if (bp->b_flags & B_WANTED) { 810 bp->b_flags &= ~B_WANTED; 811 wakeup(bp); 812 } 813 bremfree(bp); 814 bp->b_flags |= B_BUSY; 815 816 if (bp->b_flags & B_VMIO) 817 vfs_vmio_release(bp); 818 819 if (bp->b_vp) 820 brelvp(bp); 821 822fillbuf: 823 /* we are not free, nor do we contain interesting data */ 824 if (bp->b_rcred != NOCRED) { 825 crfree(bp->b_rcred); 826 bp->b_rcred = NOCRED; 827 } 828 if (bp->b_wcred != NOCRED) { 829 crfree(bp->b_wcred); 830 bp->b_wcred = NOCRED; 831 } 832 833 LIST_REMOVE(bp, b_hash); 834 LIST_INSERT_HEAD(&invalhash, bp, b_hash); 835 if (bp->b_bufsize) { 836 allocbuf(bp, 0); 837 } 838 bp->b_flags = B_BUSY; 839 bp->b_dev = NODEV; 840 bp->b_vp = NULL; 841 bp->b_blkno = bp->b_lblkno = 0; 842 bp->b_iodone = 0; 843 bp->b_error = 0; 844 bp->b_resid = 0; 845 bp->b_bcount = 0; 846 bp->b_npages = 0; 847 bp->b_data = buffers_kva + (bp - buf) * MAXBSIZE; 848 bp->b_dirtyoff = bp->b_dirtyend = 0; 849 bp->b_validoff = bp->b_validend = 0; 850 bp->b_usecount = 4; 851 if (bufspace >= maxbufspace + nbyteswritten) { 852 bp->b_flags |= B_INVAL; 853 brelse(bp); 854 goto trytofreespace; 855 } 856 return (bp); 857} 858 859/* 860 * Check to see if a block is currently memory resident. 861 */ 862struct buf * 863incore(struct vnode * vp, daddr_t blkno) 864{ 865 struct buf *bp; 866 867 int s = splbio(); 868 bp = gbincore(vp, blkno); 869 splx(s); 870 return (bp); 871} 872 873/* 874 * Returns true if no I/O is needed to access the 875 * associated VM object. This is like incore except 876 * it also hunts around in the VM system for the data. 877 */ 878 879int 880inmem(struct vnode * vp, daddr_t blkno) 881{ 882 vm_object_t obj; 883 vm_offset_t toff, tinc; 884 vm_page_t m; 885 vm_ooffset_t off; 886 887 if (incore(vp, blkno)) 888 return 1; 889 if (vp->v_mount == NULL) 890 return 0; 891 if ((vp->v_object == NULL) || (vp->v_flag & VVMIO) == 0) 892 return 0; 893 894 obj = vp->v_object; 895 tinc = PAGE_SIZE; 896 if (tinc > vp->v_mount->mnt_stat.f_iosize) 897 tinc = vp->v_mount->mnt_stat.f_iosize; 898 off = blkno * vp->v_mount->mnt_stat.f_iosize; 899 900 for (toff = 0; toff < vp->v_mount->mnt_stat.f_iosize; toff += tinc) { 901 902 m = vm_page_lookup(obj, OFF_TO_IDX(off + toff)); 903 if (!m) 904 return 0; 905 if (vm_page_is_valid(m, (vm_offset_t) (toff + off), tinc) == 0) 906 return 0; 907 } 908 return 1; 909} 910 911/* 912 * now we set the dirty range for the buffer -- 913 * for NFS -- if the file is mapped and pages have 914 * been written to, let it know. We want the 915 * entire range of the buffer to be marked dirty if 916 * any of the pages have been written to for consistancy 917 * with the b_validoff, b_validend set in the nfs write 918 * code, and used by the nfs read code. 919 */ 920static void 921vfs_setdirty(struct buf *bp) { 922 int i; 923 vm_object_t object; 924 vm_offset_t boffset, offset; 925 /* 926 * We qualify the scan for modified pages on whether the 927 * object has been flushed yet. The OBJ_WRITEABLE flag 928 * is not cleared simply by protecting pages off. 929 */ 930 if ((bp->b_flags & B_VMIO) && 931 ((object = bp->b_pages[0]->object)->flags & (OBJ_WRITEABLE|OBJ_CLEANING))) { 932 /* 933 * test the pages to see if they have been modified directly 934 * by users through the VM system. 935 */ 936 for (i = 0; i < bp->b_npages; i++) 937 vm_page_test_dirty(bp->b_pages[i]); 938 939 /* 940 * scan forwards for the first page modified 941 */ 942 for (i = 0; i < bp->b_npages; i++) { 943 if (bp->b_pages[i]->dirty) { 944 break; 945 } 946 } 947 boffset = (i << PAGE_SHIFT); 948 if (boffset < bp->b_dirtyoff) { 949 bp->b_dirtyoff = boffset; 950 } 951 952 /* 953 * scan backwards for the last page modified 954 */ 955 for (i = bp->b_npages - 1; i >= 0; --i) { 956 if (bp->b_pages[i]->dirty) { 957 break; 958 } 959 } 960 boffset = (i + 1); 961 offset = boffset + bp->b_pages[0]->pindex; 962 if (offset >= object->size) 963 boffset = object->size - bp->b_pages[0]->pindex; 964 if (bp->b_dirtyend < (boffset << PAGE_SHIFT)) 965 bp->b_dirtyend = (boffset << PAGE_SHIFT); 966 } 967} 968 969/* 970 * Get a block given a specified block and offset into a file/device. 971 */ 972struct buf * 973getblk(struct vnode * vp, daddr_t blkno, int size, int slpflag, int slptimeo) 974{ 975 struct buf *bp; 976 int s; 977 struct bufhashhdr *bh; 978 979 s = splbio(); 980loop: 981 if ((bp = gbincore(vp, blkno))) { 982 if (bp->b_flags & B_BUSY) { 983 bp->b_flags |= B_WANTED; 984 if (bp->b_usecount < BUF_MAXUSE) 985 ++bp->b_usecount; 986 if (!tsleep(bp, 987 (PRIBIO + 1) | slpflag, "getblk", slptimeo)) 988 goto loop; 989 990 splx(s); 991 return (struct buf *) NULL; 992 } 993 bp->b_flags |= B_BUSY | B_CACHE; 994 bremfree(bp); 995 996 /* 997 * check for size inconsistancies (note that they shouldn't happen 998 * but do when filesystems don't handle the size changes correctly.) 999 * We are conservative on metadata and don't just extend the buffer 1000 * but write and re-constitute it. 1001 */ 1002 1003 if (bp->b_bcount != size) { 1004 if (bp->b_flags & B_VMIO) { 1005 allocbuf(bp, size); 1006 } else { 1007 bp->b_flags |= B_NOCACHE; 1008 VOP_BWRITE(bp); 1009 goto loop; 1010 } 1011 } 1012 1013 if (bp->b_usecount < BUF_MAXUSE) 1014 ++bp->b_usecount; 1015 splx(s); 1016 return (bp); 1017 } else { 1018 vm_object_t obj; 1019 int doingvmio; 1020 1021 if ((obj = vp->v_object) && (vp->v_flag & VVMIO)) { 1022 doingvmio = 1; 1023 } else { 1024 doingvmio = 0; 1025 } 1026 if ((bp = getnewbuf(slpflag, slptimeo, doingvmio)) == 0) { 1027 if (slpflag || slptimeo) { 1028 splx(s); 1029 return NULL; 1030 } 1031 goto loop; 1032 } 1033 1034 /* 1035 * This code is used to make sure that a buffer is not 1036 * created while the getnewbuf routine is blocked. 1037 * Normally the vnode is locked so this isn't a problem. 1038 * VBLK type I/O requests, however, don't lock the vnode. 1039 */ 1040 if (!VOP_ISLOCKED(vp) && gbincore(vp, blkno)) { 1041 bp->b_flags |= B_INVAL; 1042 brelse(bp); 1043 goto loop; 1044 } 1045 1046 /* 1047 * Insert the buffer into the hash, so that it can 1048 * be found by incore. 1049 */ 1050 bp->b_blkno = bp->b_lblkno = blkno; 1051 bgetvp(vp, bp); 1052 LIST_REMOVE(bp, b_hash); 1053 bh = BUFHASH(vp, blkno); 1054 LIST_INSERT_HEAD(bh, bp, b_hash); 1055 1056 if (doingvmio) { 1057 bp->b_flags |= (B_VMIO | B_CACHE); 1058#if defined(VFS_BIO_DEBUG) 1059 if (vp->v_type != VREG && vp->v_type != VBLK) 1060 printf("getblk: vmioing file type %d???\n", vp->v_type); 1061#endif 1062 } else { 1063 bp->b_flags &= ~B_VMIO; 1064 } 1065 splx(s); 1066 1067 allocbuf(bp, size); 1068#ifdef PC98 1069 /* 1070 * 1024byte/sector support 1071 */ 1072#define B_XXX2 0x8000000 1073 if (vp->v_flag & 0x10000) bp->b_flags |= B_XXX2; 1074#endif 1075 return (bp); 1076 } 1077} 1078 1079/* 1080 * Get an empty, disassociated buffer of given size. 1081 */ 1082struct buf * 1083geteblk(int size) 1084{ 1085 struct buf *bp; 1086 int s; 1087 1088 s = splbio(); 1089 while ((bp = getnewbuf(0, 0, 0)) == 0); 1090 splx(s); 1091 allocbuf(bp, size); 1092 bp->b_flags |= B_INVAL; 1093 return (bp); 1094} 1095 1096 1097/* 1098 * This code constitutes the buffer memory from either anonymous system 1099 * memory (in the case of non-VMIO operations) or from an associated 1100 * VM object (in the case of VMIO operations). 1101 * 1102 * Note that this code is tricky, and has many complications to resolve 1103 * deadlock or inconsistant data situations. Tread lightly!!! 1104 * 1105 * Modify the length of a buffer's underlying buffer storage without 1106 * destroying information (unless, of course the buffer is shrinking). 1107 */ 1108int 1109allocbuf(struct buf * bp, int size) 1110{ 1111 1112 int s; 1113 int newbsize, mbsize; 1114 int i; 1115 1116 if (!(bp->b_flags & B_BUSY)) 1117 panic("allocbuf: buffer not busy"); 1118 1119 if ((bp->b_flags & B_VMIO) == 0) { 1120 caddr_t origbuf; 1121 int origbufsize; 1122 /* 1123 * Just get anonymous memory from the kernel 1124 */ 1125 mbsize = (size + DEV_BSIZE - 1) & ~(DEV_BSIZE - 1); 1126#if !defined(NO_B_MALLOC) 1127 if (bp->b_flags & B_MALLOC) 1128 newbsize = mbsize; 1129 else 1130#endif 1131 newbsize = round_page(size); 1132 1133 if (newbsize < bp->b_bufsize) { 1134#if !defined(NO_B_MALLOC) 1135 /* 1136 * malloced buffers are not shrunk 1137 */ 1138 if (bp->b_flags & B_MALLOC) { 1139 if (newbsize) { 1140 bp->b_bcount = size; 1141 } else { 1142 free(bp->b_data, M_BIOBUF); 1143 bufspace -= bp->b_bufsize; 1144 bufmallocspace -= bp->b_bufsize; 1145 bp->b_data = (caddr_t) buffers_kva + (bp - buf) * MAXBSIZE; 1146 bp->b_bufsize = 0; 1147 bp->b_bcount = 0; 1148 bp->b_flags &= ~B_MALLOC; 1149 } 1150 return 1; 1151 } 1152#endif 1153 vm_hold_free_pages( 1154 bp, 1155 (vm_offset_t) bp->b_data + newbsize, 1156 (vm_offset_t) bp->b_data + bp->b_bufsize); 1157 } else if (newbsize > bp->b_bufsize) { 1158#if !defined(NO_B_MALLOC) 1159 /* 1160 * We only use malloced memory on the first allocation. 1161 * and revert to page-allocated memory when the buffer grows. 1162 */ 1163 if ( (bufmallocspace < maxbufmallocspace) && 1164 (bp->b_bufsize == 0) && 1165 (mbsize <= PAGE_SIZE/2)) { 1166 1167 bp->b_data = malloc(mbsize, M_BIOBUF, M_WAITOK); 1168 bp->b_bufsize = mbsize; 1169 bp->b_bcount = size; 1170 bp->b_flags |= B_MALLOC; 1171 bufspace += mbsize; 1172 bufmallocspace += mbsize; 1173 return 1; 1174 } 1175#endif 1176 origbuf = NULL; 1177 origbufsize = 0; 1178#if !defined(NO_B_MALLOC) 1179 /* 1180 * If the buffer is growing on it's other-than-first allocation, 1181 * then we revert to the page-allocation scheme. 1182 */ 1183 if (bp->b_flags & B_MALLOC) { 1184 origbuf = bp->b_data; 1185 origbufsize = bp->b_bufsize; 1186 bp->b_data = (caddr_t) buffers_kva + (bp - buf) * MAXBSIZE; 1187 bufspace -= bp->b_bufsize; 1188 bufmallocspace -= bp->b_bufsize; 1189 bp->b_bufsize = 0; 1190 bp->b_flags &= ~B_MALLOC; 1191 newbsize = round_page(newbsize); 1192 } 1193#endif 1194 vm_hold_load_pages( 1195 bp, 1196 (vm_offset_t) bp->b_data + bp->b_bufsize, 1197 (vm_offset_t) bp->b_data + newbsize); 1198#if !defined(NO_B_MALLOC) 1199 if (origbuf) { 1200 bcopy(origbuf, bp->b_data, origbufsize); 1201 free(origbuf, M_BIOBUF); 1202 } 1203#endif 1204 } 1205 } else { 1206 vm_page_t m; 1207 int desiredpages; 1208 1209 newbsize = (size + DEV_BSIZE - 1) & ~(DEV_BSIZE - 1); 1210 desiredpages = (round_page(newbsize) >> PAGE_SHIFT); 1211 1212#if !defined(NO_B_MALLOC) 1213 if (bp->b_flags & B_MALLOC) 1214 panic("allocbuf: VMIO buffer can't be malloced"); 1215#endif 1216 1217 if (newbsize < bp->b_bufsize) { 1218 if (desiredpages < bp->b_npages) { 1219 for (i = desiredpages; i < bp->b_npages; i++) { 1220 /* 1221 * the page is not freed here -- it 1222 * is the responsibility of vnode_pager_setsize 1223 */ 1224 m = bp->b_pages[i]; 1225 s = splhigh(); 1226 while ((m->flags & PG_BUSY) || (m->busy != 0)) { 1227 m->flags |= PG_WANTED; 1228 tsleep(m, PVM, "biodep", 0); 1229 } 1230 splx(s); 1231 1232 bp->b_pages[i] = NULL; 1233 vm_page_unwire(m); 1234 } 1235 pmap_qremove((vm_offset_t) trunc_page(bp->b_data) + 1236 (desiredpages << PAGE_SHIFT), (bp->b_npages - desiredpages)); 1237 bp->b_npages = desiredpages; 1238 } 1239 } else if (newbsize > bp->b_bufsize) { 1240 vm_object_t obj; 1241 vm_offset_t tinc, toff; 1242 vm_ooffset_t off; 1243 vm_pindex_t objoff; 1244 int pageindex, curbpnpages; 1245 struct vnode *vp; 1246 int bsize; 1247 1248 vp = bp->b_vp; 1249 1250 if (vp->v_type == VBLK) 1251 bsize = DEV_BSIZE; 1252 else 1253 bsize = vp->v_mount->mnt_stat.f_iosize; 1254 1255 if (bp->b_npages < desiredpages) { 1256 obj = vp->v_object; 1257 tinc = PAGE_SIZE; 1258 if (tinc > bsize) 1259 tinc = bsize; 1260 off = (vm_ooffset_t) bp->b_lblkno * bsize; 1261 doretry: 1262 curbpnpages = bp->b_npages; 1263 bp->b_flags |= B_CACHE; 1264 for (toff = 0; toff < newbsize; toff += tinc) { 1265 int bytesinpage; 1266 1267 pageindex = toff >> PAGE_SHIFT; 1268 objoff = OFF_TO_IDX(off + toff); 1269 if (pageindex < curbpnpages) { 1270 1271 m = bp->b_pages[pageindex]; 1272#ifdef VFS_BIO_DIAG 1273 if (m->pindex != objoff) 1274 panic("allocbuf: page changed offset??!!!?"); 1275#endif 1276 bytesinpage = tinc; 1277 if (tinc > (newbsize - toff)) 1278 bytesinpage = newbsize - toff; 1279 if ((bp->b_flags & B_CACHE) && 1280 !vm_page_is_valid(m, 1281 (vm_offset_t) ((toff + off) & PAGE_MASK), 1282 bytesinpage)) { 1283 bp->b_flags &= ~B_CACHE; 1284 } 1285 continue; 1286 } 1287 m = vm_page_lookup(obj, objoff); 1288 if (!m) { 1289 m = vm_page_alloc(obj, objoff, VM_ALLOC_NORMAL); 1290 if (!m) { 1291 VM_WAIT; 1292 goto doretry; 1293 } 1294 /* 1295 * Normally it is unwise to clear PG_BUSY without 1296 * PAGE_WAKEUP -- but it is okay here, as there is 1297 * no chance for blocking between here and vm_page_alloc 1298 */ 1299 m->flags &= ~PG_BUSY; 1300 vm_page_wire(m); 1301 bp->b_flags &= ~B_CACHE; 1302 } else if (m->flags & PG_BUSY) { 1303 1304 s = splhigh(); 1305 m->flags |= PG_WANTED; 1306 tsleep(m, PVM, "pgtblk", 0); 1307 splx(s); 1308 1309 goto doretry; 1310 } else { 1311 if ((curproc != pageproc) && 1312 (m->queue == PQ_CACHE) && 1313 ((cnt.v_free_count + cnt.v_cache_count) < 1314 (cnt.v_free_min + cnt.v_cache_min))) { 1315 pagedaemon_wakeup(); 1316 } 1317 bytesinpage = tinc; 1318 if (tinc > (newbsize - toff)) 1319 bytesinpage = newbsize - toff; 1320 if ((bp->b_flags & B_CACHE) && 1321 !vm_page_is_valid(m, 1322 (vm_offset_t) ((toff + off) & PAGE_MASK), 1323 bytesinpage)) { 1324 bp->b_flags &= ~B_CACHE; 1325 } 1326 vm_page_wire(m); 1327 } 1328 bp->b_pages[pageindex] = m; 1329 curbpnpages = pageindex + 1; 1330 } 1331 bp->b_data = (caddr_t) trunc_page(bp->b_data); 1332 bp->b_npages = curbpnpages; 1333 pmap_qenter((vm_offset_t) bp->b_data, 1334 bp->b_pages, bp->b_npages); 1335 ((vm_offset_t) bp->b_data) |= off & PAGE_MASK; 1336 } 1337 } 1338 } 1339 if (bp->b_flags & B_VMIO) 1340 vmiospace += bp->b_bufsize; 1341 bufspace += (newbsize - bp->b_bufsize); 1342 bp->b_bufsize = newbsize; 1343 bp->b_bcount = size; 1344 return 1; 1345} 1346 1347/* 1348 * Wait for buffer I/O completion, returning error status. 1349 */ 1350int 1351biowait(register struct buf * bp) 1352{ 1353 int s; 1354 1355 s = splbio(); 1356 while ((bp->b_flags & B_DONE) == 0) 1357 tsleep(bp, PRIBIO, "biowait", 0); 1358 splx(s); 1359 if (bp->b_flags & B_EINTR) { 1360 bp->b_flags &= ~B_EINTR; 1361 return (EINTR); 1362 } 1363 if (bp->b_flags & B_ERROR) { 1364 return (bp->b_error ? bp->b_error : EIO); 1365 } else { 1366 return (0); 1367 } 1368} 1369 1370/* 1371 * Finish I/O on a buffer, calling an optional function. 1372 * This is usually called from interrupt level, so process blocking 1373 * is not *a good idea*. 1374 */ 1375void 1376biodone(register struct buf * bp) 1377{ 1378 int s; 1379 1380 s = splbio(); 1381 if (!(bp->b_flags & B_BUSY)) 1382 panic("biodone: buffer not busy"); 1383 1384 if (bp->b_flags & B_DONE) { 1385 splx(s); 1386 printf("biodone: buffer already done\n"); 1387 return; 1388 } 1389 bp->b_flags |= B_DONE; 1390 1391 if ((bp->b_flags & B_READ) == 0) { 1392 vwakeup(bp); 1393 } 1394#ifdef BOUNCE_BUFFERS 1395 if (bp->b_flags & B_BOUNCE) 1396 vm_bounce_free(bp); 1397#endif 1398 1399 /* call optional completion function if requested */ 1400 if (bp->b_flags & B_CALL) { 1401 bp->b_flags &= ~B_CALL; 1402 (*bp->b_iodone) (bp); 1403 splx(s); 1404 return; 1405 } 1406 if (bp->b_flags & B_VMIO) { 1407 int i, resid; 1408 vm_ooffset_t foff; 1409 vm_page_t m; 1410 vm_object_t obj; 1411 int iosize; 1412 struct vnode *vp = bp->b_vp; 1413 1414 if (vp->v_type == VBLK) 1415 foff = (vm_ooffset_t) DEV_BSIZE * bp->b_lblkno; 1416 else 1417 foff = (vm_ooffset_t) vp->v_mount->mnt_stat.f_iosize * bp->b_lblkno; 1418 obj = vp->v_object; 1419 if (!obj) { 1420 panic("biodone: no object"); 1421 } 1422#if defined(VFS_BIO_DEBUG) 1423 if (obj->paging_in_progress < bp->b_npages) { 1424 printf("biodone: paging in progress(%d) < bp->b_npages(%d)\n", 1425 obj->paging_in_progress, bp->b_npages); 1426 } 1427#endif 1428 iosize = bp->b_bufsize; 1429 for (i = 0; i < bp->b_npages; i++) { 1430 int bogusflag = 0; 1431 m = bp->b_pages[i]; 1432 if (m == bogus_page) { 1433 bogusflag = 1; 1434 m = vm_page_lookup(obj, OFF_TO_IDX(foff)); 1435 if (!m) { 1436#if defined(VFS_BIO_DEBUG) 1437 printf("biodone: page disappeared\n"); 1438#endif 1439 --obj->paging_in_progress; 1440 continue; 1441 } 1442 bp->b_pages[i] = m; 1443 pmap_qenter(trunc_page(bp->b_data), bp->b_pages, bp->b_npages); 1444 } 1445#if defined(VFS_BIO_DEBUG) 1446 if (OFF_TO_IDX(foff) != m->pindex) { 1447 printf("biodone: foff(%d)/m->pindex(%d) mismatch\n", foff, m->pindex); 1448 } 1449#endif 1450 resid = IDX_TO_OFF(m->pindex + 1) - foff; 1451 if (resid > iosize) 1452 resid = iosize; 1453 /* 1454 * In the write case, the valid and clean bits are 1455 * already changed correctly, so we only need to do this 1456 * here in the read case. 1457 */ 1458 if ((bp->b_flags & B_READ) && !bogusflag && resid > 0) { 1459 vm_page_set_validclean(m, 1460 (vm_offset_t) (foff & PAGE_MASK), resid); 1461 } 1462 1463 /* 1464 * when debugging new filesystems or buffer I/O methods, this 1465 * is the most common error that pops up. if you see this, you 1466 * have not set the page busy flag correctly!!! 1467 */ 1468 if (m->busy == 0) { 1469 printf("biodone: page busy < 0, " 1470 "pindex: %d, foff: 0x(%x,%x), " 1471 "resid: %d, index: %d\n", 1472 (int) m->pindex, (int)(foff >> 32), 1473 (int) foff & 0xffffffff, resid, i); 1474 if (vp->v_type != VBLK) 1475 printf(" iosize: %ld, lblkno: %d, flags: 0x%lx, npages: %d\n", 1476 bp->b_vp->v_mount->mnt_stat.f_iosize, 1477 (int) bp->b_lblkno, 1478 bp->b_flags, bp->b_npages); 1479 else 1480 printf(" VDEV, lblkno: %d, flags: 0x%lx, npages: %d\n", 1481 (int) bp->b_lblkno, 1482 bp->b_flags, bp->b_npages); 1483 printf(" valid: 0x%x, dirty: 0x%x, wired: %d\n", 1484 m->valid, m->dirty, m->wire_count); 1485 panic("biodone: page busy < 0\n"); 1486 } 1487 --m->busy; 1488 if ((m->busy == 0) && (m->flags & PG_WANTED)) { 1489 m->flags &= ~PG_WANTED; 1490 wakeup(m); 1491 } 1492 --obj->paging_in_progress; 1493 foff += resid; 1494 iosize -= resid; 1495 } 1496 if (obj && obj->paging_in_progress == 0 && 1497 (obj->flags & OBJ_PIPWNT)) { 1498 obj->flags &= ~OBJ_PIPWNT; 1499 wakeup(obj); 1500 } 1501 } 1502 /* 1503 * For asynchronous completions, release the buffer now. The brelse 1504 * checks for B_WANTED and will do the wakeup there if necessary - so 1505 * no need to do a wakeup here in the async case. 1506 */ 1507 1508 if (bp->b_flags & B_ASYNC) { 1509 if ((bp->b_flags & (B_NOCACHE | B_INVAL | B_ERROR | B_RELBUF)) != 0) 1510 brelse(bp); 1511 else 1512 bqrelse(bp); 1513 } else { 1514 wakeup(bp); 1515 } 1516 splx(s); 1517} 1518 1519int 1520count_lock_queue() 1521{ 1522 int count; 1523 struct buf *bp; 1524 1525 count = 0; 1526 for (bp = TAILQ_FIRST(&bufqueues[QUEUE_LOCKED]); 1527 bp != NULL; 1528 bp = TAILQ_NEXT(bp, b_freelist)) 1529 count++; 1530 return (count); 1531} 1532 1533int vfs_update_interval = 30; 1534 1535static void 1536vfs_update() 1537{ 1538 (void) spl0(); /* XXX redundant? wrong place? */ 1539 while (1) { 1540 tsleep(&vfs_update_wakeup, PUSER, "update", 1541 hz * vfs_update_interval); 1542 vfs_update_wakeup = 0; 1543 sync(curproc, NULL, NULL); 1544 } 1545} 1546 1547static int 1548sysctl_kern_updateinterval SYSCTL_HANDLER_ARGS 1549{ 1550 int error = sysctl_handle_int(oidp, 1551 oidp->oid_arg1, oidp->oid_arg2, req); 1552 if (!error) 1553 wakeup(&vfs_update_wakeup); 1554 return error; 1555} 1556 1557SYSCTL_PROC(_kern, KERN_UPDATEINTERVAL, update, CTLTYPE_INT|CTLFLAG_RW, 1558 &vfs_update_interval, 0, sysctl_kern_updateinterval, "I", ""); 1559 1560 1561/* 1562 * This routine is called in lieu of iodone in the case of 1563 * incomplete I/O. This keeps the busy status for pages 1564 * consistant. 1565 */ 1566void 1567vfs_unbusy_pages(struct buf * bp) 1568{ 1569 int i; 1570 1571 if (bp->b_flags & B_VMIO) { 1572 struct vnode *vp = bp->b_vp; 1573 vm_object_t obj = vp->v_object; 1574 vm_ooffset_t foff; 1575 1576 foff = (vm_ooffset_t) vp->v_mount->mnt_stat.f_iosize * bp->b_lblkno; 1577 1578 for (i = 0; i < bp->b_npages; i++) { 1579 vm_page_t m = bp->b_pages[i]; 1580 1581 if (m == bogus_page) { 1582 m = vm_page_lookup(obj, OFF_TO_IDX(foff) + i); 1583 if (!m) { 1584 panic("vfs_unbusy_pages: page missing\n"); 1585 } 1586 bp->b_pages[i] = m; 1587 pmap_qenter(trunc_page(bp->b_data), bp->b_pages, bp->b_npages); 1588 } 1589 --obj->paging_in_progress; 1590 --m->busy; 1591 if ((m->busy == 0) && (m->flags & PG_WANTED)) { 1592 m->flags &= ~PG_WANTED; 1593 wakeup(m); 1594 } 1595 } 1596 if (obj->paging_in_progress == 0 && 1597 (obj->flags & OBJ_PIPWNT)) { 1598 obj->flags &= ~OBJ_PIPWNT; 1599 wakeup(obj); 1600 } 1601 } 1602} 1603 1604/* 1605 * This routine is called before a device strategy routine. 1606 * It is used to tell the VM system that paging I/O is in 1607 * progress, and treat the pages associated with the buffer 1608 * almost as being PG_BUSY. Also the object paging_in_progress 1609 * flag is handled to make sure that the object doesn't become 1610 * inconsistant. 1611 */ 1612void 1613vfs_busy_pages(struct buf * bp, int clear_modify) 1614{ 1615 int i; 1616 1617 if (bp->b_flags & B_VMIO) { 1618 vm_object_t obj = bp->b_vp->v_object; 1619 vm_ooffset_t foff; 1620 int iocount = bp->b_bufsize; 1621 1622 if (bp->b_vp->v_type == VBLK) 1623 foff = (vm_ooffset_t) DEV_BSIZE * bp->b_lblkno; 1624 else 1625 foff = (vm_ooffset_t) bp->b_vp->v_mount->mnt_stat.f_iosize * bp->b_lblkno; 1626 vfs_setdirty(bp); 1627 for (i = 0; i < bp->b_npages; i++) { 1628 vm_page_t m = bp->b_pages[i]; 1629 int resid = IDX_TO_OFF(m->pindex + 1) - foff; 1630 1631 if (resid > iocount) 1632 resid = iocount; 1633 if ((bp->b_flags & B_CLUSTER) == 0) { 1634 obj->paging_in_progress++; 1635 m->busy++; 1636 } 1637 vm_page_protect(m, VM_PROT_NONE); 1638 if (clear_modify) { 1639 vm_page_set_validclean(m, 1640 (vm_offset_t) (foff & PAGE_MASK), resid); 1641 } else if (bp->b_bcount >= PAGE_SIZE) { 1642 if (m->valid && (bp->b_flags & B_CACHE) == 0) { 1643 bp->b_pages[i] = bogus_page; 1644 pmap_qenter(trunc_page(bp->b_data), bp->b_pages, bp->b_npages); 1645 } 1646 } 1647 foff += resid; 1648 iocount -= resid; 1649 } 1650 } 1651} 1652 1653/* 1654 * Tell the VM system that the pages associated with this buffer 1655 * are clean. This is used for delayed writes where the data is 1656 * going to go to disk eventually without additional VM intevention. 1657 */ 1658void 1659vfs_clean_pages(struct buf * bp) 1660{ 1661 int i; 1662 1663 if (bp->b_flags & B_VMIO) { 1664 vm_ooffset_t foff; 1665 int iocount = bp->b_bufsize; 1666 1667 if (bp->b_vp->v_type == VBLK) 1668 foff = (vm_ooffset_t) DEV_BSIZE * bp->b_lblkno; 1669 else 1670 foff = (vm_ooffset_t) bp->b_vp->v_mount->mnt_stat.f_iosize * bp->b_lblkno; 1671 1672 for (i = 0; i < bp->b_npages; i++) { 1673 vm_page_t m = bp->b_pages[i]; 1674 int resid = IDX_TO_OFF(m->pindex + 1) - foff; 1675 1676 if (resid > iocount) 1677 resid = iocount; 1678 if (resid > 0) { 1679 vm_page_set_validclean(m, 1680 ((vm_offset_t) foff & PAGE_MASK), resid); 1681 } 1682 foff += resid; 1683 iocount -= resid; 1684 } 1685 } 1686} 1687 1688void 1689vfs_bio_clrbuf(struct buf *bp) { 1690 int i; 1691 if( bp->b_flags & B_VMIO) { 1692 if( (bp->b_npages == 1) && (bp->b_bufsize < PAGE_SIZE)) { 1693 int mask; 1694 mask = 0; 1695 for(i=0;i<bp->b_bufsize;i+=DEV_BSIZE) 1696 mask |= (1 << (i/DEV_BSIZE)); 1697 if( bp->b_pages[0]->valid != mask) { 1698 bzero(bp->b_data, bp->b_bufsize); 1699 } 1700 bp->b_pages[0]->valid = mask; 1701 bp->b_resid = 0; 1702 return; 1703 } 1704 for(i=0;i<bp->b_npages;i++) { 1705 if( bp->b_pages[i]->valid == VM_PAGE_BITS_ALL) 1706 continue; 1707 if( bp->b_pages[i]->valid == 0) { 1708 if ((bp->b_pages[i]->flags & PG_ZERO) == 0) { 1709 bzero(bp->b_data + (i << PAGE_SHIFT), PAGE_SIZE); 1710 } 1711 } else { 1712 int j; 1713 for(j=0;j<PAGE_SIZE/DEV_BSIZE;j++) { 1714 if( (bp->b_pages[i]->valid & (1<<j)) == 0) 1715 bzero(bp->b_data + (i << PAGE_SHIFT) + j * DEV_BSIZE, DEV_BSIZE); 1716 } 1717 } 1718 /* bp->b_pages[i]->valid = VM_PAGE_BITS_ALL; */ 1719 } 1720 bp->b_resid = 0; 1721 } else { 1722 clrbuf(bp); 1723 } 1724} 1725 1726/* 1727 * vm_hold_load_pages and vm_hold_unload pages get pages into 1728 * a buffers address space. The pages are anonymous and are 1729 * not associated with a file object. 1730 */ 1731void 1732vm_hold_load_pages(struct buf * bp, vm_offset_t from, vm_offset_t to) 1733{ 1734 vm_offset_t pg; 1735 vm_page_t p; 1736 int index; 1737 1738 to = round_page(to); 1739 from = round_page(from); 1740 index = (from - trunc_page(bp->b_data)) >> PAGE_SHIFT; 1741 1742 for (pg = from; pg < to; pg += PAGE_SIZE, index++) { 1743 1744tryagain: 1745 1746 p = vm_page_alloc(kernel_object, ((pg - VM_MIN_KERNEL_ADDRESS) >> PAGE_SHIFT), 1747 VM_ALLOC_NORMAL); 1748 if (!p) { 1749 VM_WAIT; 1750 goto tryagain; 1751 } 1752 vm_page_wire(p); 1753 pmap_kenter(pg, VM_PAGE_TO_PHYS(p)); 1754 bp->b_pages[index] = p; 1755 PAGE_WAKEUP(p); 1756 } 1757 bp->b_npages = to >> PAGE_SHIFT; 1758} 1759 1760void 1761vm_hold_free_pages(struct buf * bp, vm_offset_t from, vm_offset_t to) 1762{ 1763 vm_offset_t pg; 1764 vm_page_t p; 1765 int index; 1766 1767 from = round_page(from); 1768 to = round_page(to); 1769 index = (from - trunc_page(bp->b_data)) >> PAGE_SHIFT; 1770 1771 for (pg = from; pg < to; pg += PAGE_SIZE, index++) { 1772 p = bp->b_pages[index]; 1773 if (p && (index < bp->b_npages)) { 1774 if (p->busy) { 1775 printf("vm_hold_free_pages: blkno: %d, lblkno: %d\n", 1776 bp->b_blkno, bp->b_lblkno); 1777 } 1778 bp->b_pages[index] = NULL; 1779 pmap_kremove(pg); 1780 vm_page_unwire(p); 1781 vm_page_free(p); 1782 } 1783 } 1784 bp->b_npages = from >> PAGE_SHIFT; 1785} 1786