vfs_cluster.c revision 112175
1/*- 2 * Copyright (c) 1993 3 * The Regents of the University of California. All rights reserved. 4 * Modifications/enhancements: 5 * Copyright (c) 1995 John S. Dyson. All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 3. All advertising materials mentioning features or use of this software 16 * must display the following acknowledgement: 17 * This product includes software developed by the University of 18 * California, Berkeley and its contributors. 19 * 4. Neither the name of the University nor the names of its contributors 20 * may be used to endorse or promote products derived from this software 21 * without specific prior written permission. 22 * 23 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 24 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 25 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 26 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 27 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 28 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 29 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 30 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 31 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 32 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 33 * SUCH DAMAGE. 34 * 35 * @(#)vfs_cluster.c 8.7 (Berkeley) 2/13/94 36 * $FreeBSD: head/sys/kern/vfs_cluster.c 112175 2003-03-13 06:17:59Z jeff $ 37 */ 38 39#include "opt_debug_cluster.h" 40 41#include <sys/param.h> 42#include <sys/systm.h> 43#include <sys/stdint.h> 44#include <sys/kernel.h> 45#include <sys/proc.h> 46#include <sys/bio.h> 47#include <sys/buf.h> 48#include <sys/vnode.h> 49#include <sys/malloc.h> 50#include <sys/mount.h> 51#include <sys/resourcevar.h> 52#include <sys/vmmeter.h> 53#include <vm/vm.h> 54#include <vm/vm_object.h> 55#include <vm/vm_page.h> 56#include <sys/sysctl.h> 57 58#if defined(CLUSTERDEBUG) 59static int rcluster= 0; 60SYSCTL_INT(_debug, OID_AUTO, rcluster, CTLFLAG_RW, &rcluster, 0, 61 "Debug VFS clustering code"); 62#endif 63 64static MALLOC_DEFINE(M_SEGMENT, "cluster_save buffer", "cluster_save buffer"); 65 66static struct cluster_save * 67 cluster_collectbufs(struct vnode *vp, struct buf *last_bp); 68static struct buf * 69 cluster_rbuild(struct vnode *vp, u_quad_t filesize, daddr_t lbn, 70 daddr_t blkno, long size, int run, struct buf *fbp); 71 72static int write_behind = 1; 73SYSCTL_INT(_vfs, OID_AUTO, write_behind, CTLFLAG_RW, &write_behind, 0, 74 "Cluster write-behind; 0: disable, 1: enable, 2: backed off"); 75 76static int read_max = 8; 77SYSCTL_INT(_vfs, OID_AUTO, read_max, CTLFLAG_RW, &read_max, 0, 78 "Cluster read-ahead max block count"); 79 80/* Page expended to mark partially backed buffers */ 81extern vm_page_t bogus_page; 82 83/* 84 * Number of physical bufs (pbufs) this subsystem is allowed. 85 * Manipulated by vm_pager.c 86 */ 87extern int cluster_pbuf_freecnt; 88 89/* 90 * Read data to a buf, including read-ahead if we find this to be beneficial. 91 * cluster_read replaces bread. 92 */ 93int 94cluster_read(vp, filesize, lblkno, size, cred, totread, seqcount, bpp) 95 struct vnode *vp; 96 u_quad_t filesize; 97 daddr_t lblkno; 98 long size; 99 struct ucred *cred; 100 long totread; 101 int seqcount; 102 struct buf **bpp; 103{ 104 struct buf *bp, *rbp, *reqbp; 105 daddr_t blkno, origblkno; 106 int maxra, racluster; 107 int error, ncontig; 108 int i; 109 110 error = 0; 111 112 /* 113 * Try to limit the amount of read-ahead by a few 114 * ad-hoc parameters. This needs work!!! 115 */ 116 racluster = vp->v_mount->mnt_iosize_max / size; 117 maxra = seqcount; 118 maxra = min(read_max, maxra); 119 maxra = min(nbuf/8, maxra); 120 if (((u_quad_t)(lblkno + maxra + 1) * size) > filesize) 121 maxra = (filesize / size) - lblkno; 122 123 /* 124 * get the requested block 125 */ 126 *bpp = reqbp = bp = getblk(vp, lblkno, size, 0, 0, 0); 127 origblkno = lblkno; 128 129 /* 130 * if it is in the cache, then check to see if the reads have been 131 * sequential. If they have, then try some read-ahead, otherwise 132 * back-off on prospective read-aheads. 133 */ 134 if (bp->b_flags & B_CACHE) { 135 if (!seqcount) { 136 return 0; 137 } else if ((bp->b_flags & B_RAM) == 0) { 138 return 0; 139 } else { 140 int s; 141 bp->b_flags &= ~B_RAM; 142 /* 143 * We do the spl here so that there is no window 144 * between the incore and the b_usecount increment 145 * below. We opt to keep the spl out of the loop 146 * for efficiency. 147 */ 148 s = splbio(); 149 VI_LOCK(vp); 150 for (i = 1; i < maxra; i++) { 151 /* 152 * Stop if the buffer does not exist or it 153 * is invalid (about to go away?) 154 */ 155 rbp = gbincore(vp, lblkno+i); 156 if (rbp == NULL || (rbp->b_flags & B_INVAL)) 157 break; 158 159 /* 160 * Set another read-ahead mark so we know 161 * to check again. 162 */ 163 if (((i % racluster) == (racluster - 1)) || 164 (i == (maxra - 1))) 165 rbp->b_flags |= B_RAM; 166 } 167 VI_UNLOCK(vp); 168 splx(s); 169 if (i >= maxra) { 170 return 0; 171 } 172 lblkno += i; 173 } 174 reqbp = bp = NULL; 175 /* 176 * If it isn't in the cache, then get a chunk from 177 * disk if sequential, otherwise just get the block. 178 */ 179 } else { 180 off_t firstread = bp->b_offset; 181 int nblks; 182 183 KASSERT(bp->b_offset != NOOFFSET, 184 ("cluster_read: no buffer offset")); 185 186 ncontig = 0; 187 188 /* 189 * Compute the total number of blocks that we should read 190 * synchronously. 191 */ 192 if (firstread + totread > filesize) 193 totread = filesize - firstread; 194 nblks = howmany(totread, size); 195 if (nblks > racluster) 196 nblks = racluster; 197 198 /* 199 * Now compute the number of contiguous blocks. 200 */ 201 if (nblks > 1) { 202 error = VOP_BMAP(vp, lblkno, NULL, 203 &blkno, &ncontig, NULL); 204 /* 205 * If this failed to map just do the original block. 206 */ 207 if (error || blkno == -1) 208 ncontig = 0; 209 } 210 211 /* 212 * If we have contiguous data available do a cluster 213 * otherwise just read the requested block. 214 */ 215 if (ncontig) { 216 /* Account for our first block. */ 217 ncontig = min(ncontig + 1, nblks); 218 if (ncontig < nblks) 219 nblks = ncontig; 220 bp = cluster_rbuild(vp, filesize, lblkno, 221 blkno, size, nblks, bp); 222 lblkno += (bp->b_bufsize / size); 223 } else { 224 bp->b_flags |= B_RAM; 225 bp->b_iocmd = BIO_READ; 226 lblkno += 1; 227 } 228 } 229 230 /* 231 * handle the synchronous read so that it is available ASAP. 232 */ 233 if (bp) { 234 if ((bp->b_flags & B_CLUSTER) == 0) { 235 vfs_busy_pages(bp, 0); 236 } 237 bp->b_flags &= ~B_INVAL; 238 bp->b_ioflags &= ~BIO_ERROR; 239 if ((bp->b_flags & B_ASYNC) || bp->b_iodone != NULL) 240 BUF_KERNPROC(bp); 241 error = VOP_STRATEGY(vp, bp); 242 curproc->p_stats->p_ru.ru_inblock++; 243 if (error) 244 return (error); 245 } 246 247 /* 248 * If we have been doing sequential I/O, then do some read-ahead. 249 */ 250 while (lblkno < (origblkno + maxra)) { 251 error = VOP_BMAP(vp, lblkno, NULL, &blkno, &ncontig, NULL); 252 if (error) 253 break; 254 255 if (blkno == -1) 256 break; 257 258 /* 259 * We could throttle ncontig here by maxra but we might as 260 * well read the data if it is contiguous. We're throttled 261 * by racluster anyway. 262 */ 263 if (ncontig) { 264 ncontig = min(ncontig + 1, racluster); 265 rbp = cluster_rbuild(vp, filesize, lblkno, blkno, 266 size, ncontig, NULL); 267 lblkno += (rbp->b_bufsize / size); 268 } else { 269 rbp = getblk(vp, lblkno, size, 0, 0, 0); 270 rbp->b_flags |= B_ASYNC | B_RAM; 271 rbp->b_iocmd = BIO_READ; 272 rbp->b_blkno = blkno; 273 lblkno += 1; 274 } 275 if (rbp->b_flags & B_CACHE) { 276 rbp->b_flags &= ~B_ASYNC; 277 bqrelse(rbp); 278 continue; 279 } 280 if ((rbp->b_flags & B_CLUSTER) == 0) { 281 vfs_busy_pages(rbp, 0); 282 } 283 rbp->b_flags &= ~B_INVAL; 284 rbp->b_ioflags &= ~BIO_ERROR; 285 if ((rbp->b_flags & B_ASYNC) || rbp->b_iodone != NULL) 286 BUF_KERNPROC(rbp); 287 (void) VOP_STRATEGY(vp, rbp); 288 curproc->p_stats->p_ru.ru_inblock++; 289 } 290 291 if (reqbp) 292 return (bufwait(reqbp)); 293 else 294 return (error); 295} 296 297/* 298 * If blocks are contiguous on disk, use this to provide clustered 299 * read ahead. We will read as many blocks as possible sequentially 300 * and then parcel them up into logical blocks in the buffer hash table. 301 */ 302static struct buf * 303cluster_rbuild(vp, filesize, lbn, blkno, size, run, fbp) 304 struct vnode *vp; 305 u_quad_t filesize; 306 daddr_t lbn; 307 daddr_t blkno; 308 long size; 309 int run; 310 struct buf *fbp; 311{ 312 struct buf *bp, *tbp; 313 daddr_t bn; 314 int i, inc, j; 315 316 GIANT_REQUIRED; 317 318 KASSERT(size == vp->v_mount->mnt_stat.f_iosize, 319 ("cluster_rbuild: size %ld != filesize %ld\n", 320 size, vp->v_mount->mnt_stat.f_iosize)); 321 322 /* 323 * avoid a division 324 */ 325 while ((u_quad_t) size * (lbn + run) > filesize) { 326 --run; 327 } 328 329 if (fbp) { 330 tbp = fbp; 331 tbp->b_iocmd = BIO_READ; 332 } else { 333 tbp = getblk(vp, lbn, size, 0, 0, 0); 334 if (tbp->b_flags & B_CACHE) 335 return tbp; 336 tbp->b_flags |= B_ASYNC | B_RAM; 337 tbp->b_iocmd = BIO_READ; 338 } 339 340 tbp->b_blkno = blkno; 341 if( (tbp->b_flags & B_MALLOC) || 342 ((tbp->b_flags & B_VMIO) == 0) || (run <= 1) ) 343 return tbp; 344 345 bp = trypbuf(&cluster_pbuf_freecnt); 346 if (bp == 0) 347 return tbp; 348 349 /* 350 * We are synthesizing a buffer out of vm_page_t's, but 351 * if the block size is not page aligned then the starting 352 * address may not be either. Inherit the b_data offset 353 * from the original buffer. 354 */ 355 bp->b_data = (char *)((vm_offset_t)bp->b_data | 356 ((vm_offset_t)tbp->b_data & PAGE_MASK)); 357 bp->b_flags = B_ASYNC | B_CLUSTER | B_VMIO; 358 bp->b_iocmd = BIO_READ; 359 bp->b_iodone = cluster_callback; 360 bp->b_blkno = blkno; 361 bp->b_lblkno = lbn; 362 bp->b_offset = tbp->b_offset; 363 KASSERT(bp->b_offset != NOOFFSET, ("cluster_rbuild: no buffer offset")); 364 pbgetvp(vp, bp); 365 366 TAILQ_INIT(&bp->b_cluster.cluster_head); 367 368 bp->b_bcount = 0; 369 bp->b_bufsize = 0; 370 bp->b_npages = 0; 371 372 inc = btodb(size); 373 for (bn = blkno, i = 0; i < run; ++i, bn += inc) { 374 if (i != 0) { 375 if ((bp->b_npages * PAGE_SIZE) + 376 round_page(size) > vp->v_mount->mnt_iosize_max) { 377 break; 378 } 379 380 tbp = getblk(vp, lbn + i, size, 0, 0, GB_LOCK_NOWAIT); 381 382 /* Don't wait around for locked bufs. */ 383 if (tbp == NULL) 384 break; 385 386 /* 387 * Stop scanning if the buffer is fully valid 388 * (marked B_CACHE), or locked (may be doing a 389 * background write), or if the buffer is not 390 * VMIO backed. The clustering code can only deal 391 * with VMIO-backed buffers. 392 */ 393 if ((tbp->b_flags & (B_CACHE|B_LOCKED)) || 394 (tbp->b_flags & B_VMIO) == 0) { 395 bqrelse(tbp); 396 break; 397 } 398 399 /* 400 * The buffer must be completely invalid in order to 401 * take part in the cluster. If it is partially valid 402 * then we stop. 403 */ 404 for (j = 0;j < tbp->b_npages; j++) { 405 if (tbp->b_pages[j]->valid) 406 break; 407 } 408 if (j != tbp->b_npages) { 409 bqrelse(tbp); 410 break; 411 } 412 413 /* 414 * Set a read-ahead mark as appropriate 415 */ 416 if ((fbp && (i == 1)) || (i == (run - 1))) 417 tbp->b_flags |= B_RAM; 418 419 /* 420 * Set the buffer up for an async read (XXX should 421 * we do this only if we do not wind up brelse()ing?). 422 * Set the block number if it isn't set, otherwise 423 * if it is make sure it matches the block number we 424 * expect. 425 */ 426 tbp->b_flags |= B_ASYNC; 427 tbp->b_iocmd = BIO_READ; 428 if (tbp->b_blkno == tbp->b_lblkno) { 429 tbp->b_blkno = bn; 430 } else if (tbp->b_blkno != bn) { 431 brelse(tbp); 432 break; 433 } 434 } 435 /* 436 * XXX fbp from caller may not be B_ASYNC, but we are going 437 * to biodone() it in cluster_callback() anyway 438 */ 439 BUF_KERNPROC(tbp); 440 TAILQ_INSERT_TAIL(&bp->b_cluster.cluster_head, 441 tbp, b_cluster.cluster_entry); 442 vm_page_lock_queues(); 443 for (j = 0; j < tbp->b_npages; j += 1) { 444 vm_page_t m; 445 m = tbp->b_pages[j]; 446 vm_page_io_start(m); 447 vm_object_pip_add(m->object, 1); 448 if ((bp->b_npages == 0) || 449 (bp->b_pages[bp->b_npages-1] != m)) { 450 bp->b_pages[bp->b_npages] = m; 451 bp->b_npages++; 452 } 453 if ((m->valid & VM_PAGE_BITS_ALL) == VM_PAGE_BITS_ALL) 454 tbp->b_pages[j] = bogus_page; 455 } 456 vm_page_unlock_queues(); 457 /* 458 * XXX shouldn't this be += size for both, like in 459 * cluster_wbuild()? 460 * 461 * Don't inherit tbp->b_bufsize as it may be larger due to 462 * a non-page-aligned size. Instead just aggregate using 463 * 'size'. 464 */ 465 if (tbp->b_bcount != size) 466 printf("warning: tbp->b_bcount wrong %ld vs %ld\n", tbp->b_bcount, size); 467 if (tbp->b_bufsize != size) 468 printf("warning: tbp->b_bufsize wrong %ld vs %ld\n", tbp->b_bufsize, size); 469 bp->b_bcount += size; 470 bp->b_bufsize += size; 471 } 472 473 /* 474 * Fully valid pages in the cluster are already good and do not need 475 * to be re-read from disk. Replace the page with bogus_page 476 */ 477 for (j = 0; j < bp->b_npages; j++) { 478 if ((bp->b_pages[j]->valid & VM_PAGE_BITS_ALL) == 479 VM_PAGE_BITS_ALL) { 480 bp->b_pages[j] = bogus_page; 481 } 482 } 483 if (bp->b_bufsize > bp->b_kvasize) 484 panic("cluster_rbuild: b_bufsize(%ld) > b_kvasize(%d)\n", 485 bp->b_bufsize, bp->b_kvasize); 486 bp->b_kvasize = bp->b_bufsize; 487 488 pmap_qenter(trunc_page((vm_offset_t) bp->b_data), 489 (vm_page_t *)bp->b_pages, bp->b_npages); 490 return (bp); 491} 492 493/* 494 * Cleanup after a clustered read or write. 495 * This is complicated by the fact that any of the buffers might have 496 * extra memory (if there were no empty buffer headers at allocbuf time) 497 * that we will need to shift around. 498 */ 499void 500cluster_callback(bp) 501 struct buf *bp; 502{ 503 struct buf *nbp, *tbp; 504 int error = 0; 505 506 GIANT_REQUIRED; 507 508 /* 509 * Must propogate errors to all the components. 510 */ 511 if (bp->b_ioflags & BIO_ERROR) 512 error = bp->b_error; 513 514 pmap_qremove(trunc_page((vm_offset_t) bp->b_data), bp->b_npages); 515 /* 516 * Move memory from the large cluster buffer into the component 517 * buffers and mark IO as done on these. 518 */ 519 for (tbp = TAILQ_FIRST(&bp->b_cluster.cluster_head); 520 tbp; tbp = nbp) { 521 nbp = TAILQ_NEXT(&tbp->b_cluster, cluster_entry); 522 if (error) { 523 tbp->b_ioflags |= BIO_ERROR; 524 tbp->b_error = error; 525 } else { 526 tbp->b_dirtyoff = tbp->b_dirtyend = 0; 527 tbp->b_flags &= ~B_INVAL; 528 tbp->b_ioflags &= ~BIO_ERROR; 529 /* 530 * XXX the bdwrite()/bqrelse() issued during 531 * cluster building clears B_RELBUF (see bqrelse() 532 * comment). If direct I/O was specified, we have 533 * to restore it here to allow the buffer and VM 534 * to be freed. 535 */ 536 if (tbp->b_flags & B_DIRECT) 537 tbp->b_flags |= B_RELBUF; 538 } 539 bufdone(tbp); 540 } 541 relpbuf(bp, &cluster_pbuf_freecnt); 542} 543 544/* 545 * cluster_wbuild_wb: 546 * 547 * Implement modified write build for cluster. 548 * 549 * write_behind = 0 write behind disabled 550 * write_behind = 1 write behind normal (default) 551 * write_behind = 2 write behind backed-off 552 */ 553 554static __inline int 555cluster_wbuild_wb(struct vnode *vp, long size, daddr_t start_lbn, int len) 556{ 557 int r = 0; 558 559 switch(write_behind) { 560 case 2: 561 if (start_lbn < len) 562 break; 563 start_lbn -= len; 564 /* FALLTHROUGH */ 565 case 1: 566 r = cluster_wbuild(vp, size, start_lbn, len); 567 /* FALLTHROUGH */ 568 default: 569 /* FALLTHROUGH */ 570 break; 571 } 572 return(r); 573} 574 575/* 576 * Do clustered write for FFS. 577 * 578 * Three cases: 579 * 1. Write is not sequential (write asynchronously) 580 * Write is sequential: 581 * 2. beginning of cluster - begin cluster 582 * 3. middle of a cluster - add to cluster 583 * 4. end of a cluster - asynchronously write cluster 584 */ 585void 586cluster_write(bp, filesize, seqcount) 587 struct buf *bp; 588 u_quad_t filesize; 589 int seqcount; 590{ 591 struct vnode *vp; 592 daddr_t lbn; 593 int maxclen, cursize; 594 int lblocksize; 595 int async; 596 597 vp = bp->b_vp; 598 if (vp->v_type == VREG) { 599 async = vp->v_mount->mnt_flag & MNT_ASYNC; 600 lblocksize = vp->v_mount->mnt_stat.f_iosize; 601 } else { 602 async = 0; 603 lblocksize = bp->b_bufsize; 604 } 605 lbn = bp->b_lblkno; 606 KASSERT(bp->b_offset != NOOFFSET, ("cluster_write: no buffer offset")); 607 608 /* Initialize vnode to beginning of file. */ 609 if (lbn == 0) 610 vp->v_lasta = vp->v_clen = vp->v_cstart = vp->v_lastw = 0; 611 612 if (vp->v_clen == 0 || lbn != vp->v_lastw + 1 || 613 (bp->b_blkno != vp->v_lasta + btodb(lblocksize))) { 614 maxclen = vp->v_mount->mnt_iosize_max / lblocksize - 1; 615 if (vp->v_clen != 0) { 616 /* 617 * Next block is not sequential. 618 * 619 * If we are not writing at end of file, the process 620 * seeked to another point in the file since its last 621 * write, or we have reached our maximum cluster size, 622 * then push the previous cluster. Otherwise try 623 * reallocating to make it sequential. 624 * 625 * Change to algorithm: only push previous cluster if 626 * it was sequential from the point of view of the 627 * seqcount heuristic, otherwise leave the buffer 628 * intact so we can potentially optimize the I/O 629 * later on in the buf_daemon or update daemon 630 * flush. 631 */ 632 cursize = vp->v_lastw - vp->v_cstart + 1; 633 if (((u_quad_t) bp->b_offset + lblocksize) != filesize || 634 lbn != vp->v_lastw + 1 || vp->v_clen <= cursize) { 635 if (!async && seqcount > 0) { 636 cluster_wbuild_wb(vp, lblocksize, 637 vp->v_cstart, cursize); 638 } 639 } else { 640 struct buf **bpp, **endbp; 641 struct cluster_save *buflist; 642 643 buflist = cluster_collectbufs(vp, bp); 644 endbp = &buflist->bs_children 645 [buflist->bs_nchildren - 1]; 646 if (VOP_REALLOCBLKS(vp, buflist)) { 647 /* 648 * Failed, push the previous cluster 649 * if *really* writing sequentially 650 * in the logical file (seqcount > 1), 651 * otherwise delay it in the hopes that 652 * the low level disk driver can 653 * optimize the write ordering. 654 */ 655 for (bpp = buflist->bs_children; 656 bpp < endbp; bpp++) 657 brelse(*bpp); 658 free(buflist, M_SEGMENT); 659 if (seqcount > 1) { 660 cluster_wbuild_wb(vp, 661 lblocksize, vp->v_cstart, 662 cursize); 663 } 664 } else { 665 /* 666 * Succeeded, keep building cluster. 667 */ 668 for (bpp = buflist->bs_children; 669 bpp <= endbp; bpp++) 670 bdwrite(*bpp); 671 free(buflist, M_SEGMENT); 672 vp->v_lastw = lbn; 673 vp->v_lasta = bp->b_blkno; 674 return; 675 } 676 } 677 } 678 /* 679 * Consider beginning a cluster. If at end of file, make 680 * cluster as large as possible, otherwise find size of 681 * existing cluster. 682 */ 683 if ((vp->v_type == VREG) && 684 ((u_quad_t) bp->b_offset + lblocksize) != filesize && 685 (bp->b_blkno == bp->b_lblkno) && 686 (VOP_BMAP(vp, lbn, NULL, &bp->b_blkno, &maxclen, NULL) || 687 bp->b_blkno == -1)) { 688 bawrite(bp); 689 vp->v_clen = 0; 690 vp->v_lasta = bp->b_blkno; 691 vp->v_cstart = lbn + 1; 692 vp->v_lastw = lbn; 693 return; 694 } 695 vp->v_clen = maxclen; 696 if (!async && maxclen == 0) { /* I/O not contiguous */ 697 vp->v_cstart = lbn + 1; 698 bawrite(bp); 699 } else { /* Wait for rest of cluster */ 700 vp->v_cstart = lbn; 701 bdwrite(bp); 702 } 703 } else if (lbn == vp->v_cstart + vp->v_clen) { 704 /* 705 * At end of cluster, write it out if seqcount tells us we 706 * are operating sequentially, otherwise let the buf or 707 * update daemon handle it. 708 */ 709 bdwrite(bp); 710 if (seqcount > 1) 711 cluster_wbuild_wb(vp, lblocksize, vp->v_cstart, vp->v_clen + 1); 712 vp->v_clen = 0; 713 vp->v_cstart = lbn + 1; 714 } else if (vm_page_count_severe()) { 715 /* 716 * We are low on memory, get it going NOW 717 */ 718 bawrite(bp); 719 } else { 720 /* 721 * In the middle of a cluster, so just delay the I/O for now. 722 */ 723 bdwrite(bp); 724 } 725 vp->v_lastw = lbn; 726 vp->v_lasta = bp->b_blkno; 727} 728 729 730/* 731 * This is an awful lot like cluster_rbuild...wish they could be combined. 732 * The last lbn argument is the current block on which I/O is being 733 * performed. Check to see that it doesn't fall in the middle of 734 * the current block (if last_bp == NULL). 735 */ 736int 737cluster_wbuild(vp, size, start_lbn, len) 738 struct vnode *vp; 739 long size; 740 daddr_t start_lbn; 741 int len; 742{ 743 struct buf *bp, *tbp; 744 int i, j, s; 745 int totalwritten = 0; 746 int dbsize = btodb(size); 747 748 GIANT_REQUIRED; 749 750 while (len > 0) { 751 s = splbio(); 752 /* 753 * If the buffer is not delayed-write (i.e. dirty), or it 754 * is delayed-write but either locked or inval, it cannot 755 * partake in the clustered write. 756 */ 757 VI_LOCK(vp); 758 if ((tbp = gbincore(vp, start_lbn)) == NULL) { 759 VI_UNLOCK(vp); 760 ++start_lbn; 761 --len; 762 splx(s); 763 continue; 764 } 765 if (BUF_LOCK(tbp, 766 LK_EXCLUSIVE | LK_NOWAIT | LK_INTERLOCK, VI_MTX(vp))) { 767 ++start_lbn; 768 --len; 769 splx(s); 770 continue; 771 } 772 if ((tbp->b_flags & (B_LOCKED | B_INVAL | B_DELWRI)) != 773 B_DELWRI) { 774 BUF_UNLOCK(tbp); 775 ++start_lbn; 776 --len; 777 splx(s); 778 continue; 779 } 780 bremfree(tbp); 781 tbp->b_flags &= ~B_DONE; 782 splx(s); 783 784 /* 785 * Extra memory in the buffer, punt on this buffer. 786 * XXX we could handle this in most cases, but we would 787 * have to push the extra memory down to after our max 788 * possible cluster size and then potentially pull it back 789 * up if the cluster was terminated prematurely--too much 790 * hassle. 791 */ 792 if (((tbp->b_flags & (B_CLUSTEROK | B_MALLOC | B_VMIO)) != 793 (B_CLUSTEROK | B_VMIO)) || 794 (tbp->b_bcount != tbp->b_bufsize) || 795 (tbp->b_bcount != size) || 796 (len == 1) || 797 ((bp = getpbuf(&cluster_pbuf_freecnt)) == NULL)) { 798 totalwritten += tbp->b_bufsize; 799 bawrite(tbp); 800 ++start_lbn; 801 --len; 802 continue; 803 } 804 805 /* 806 * We got a pbuf to make the cluster in. 807 * so initialise it. 808 */ 809 TAILQ_INIT(&bp->b_cluster.cluster_head); 810 bp->b_bcount = 0; 811 bp->b_magic = tbp->b_magic; 812 bp->b_op = tbp->b_op; 813 bp->b_bufsize = 0; 814 bp->b_npages = 0; 815 if (tbp->b_wcred != NOCRED) 816 bp->b_wcred = crhold(tbp->b_wcred); 817 818 bp->b_blkno = tbp->b_blkno; 819 bp->b_lblkno = tbp->b_lblkno; 820 bp->b_offset = tbp->b_offset; 821 822 /* 823 * We are synthesizing a buffer out of vm_page_t's, but 824 * if the block size is not page aligned then the starting 825 * address may not be either. Inherit the b_data offset 826 * from the original buffer. 827 */ 828 bp->b_data = (char *)((vm_offset_t)bp->b_data | 829 ((vm_offset_t)tbp->b_data & PAGE_MASK)); 830 bp->b_flags |= B_CLUSTER | 831 (tbp->b_flags & (B_VMIO | B_NEEDCOMMIT | B_NOWDRAIN)); 832 bp->b_iodone = cluster_callback; 833 pbgetvp(vp, bp); 834 /* 835 * From this location in the file, scan forward to see 836 * if there are buffers with adjacent data that need to 837 * be written as well. 838 */ 839 for (i = 0; i < len; ++i, ++start_lbn) { 840 if (i != 0) { /* If not the first buffer */ 841 s = splbio(); 842 /* 843 * If the adjacent data is not even in core it 844 * can't need to be written. 845 */ 846 VI_LOCK(vp); 847 if ((tbp = gbincore(vp, start_lbn)) == NULL) { 848 VI_UNLOCK(vp); 849 splx(s); 850 break; 851 } 852 853 /* 854 * If it IS in core, but has different 855 * characteristics, or is locked (which 856 * means it could be undergoing a background 857 * I/O or be in a weird state), then don't 858 * cluster with it. 859 */ 860 if (BUF_LOCK(tbp, 861 LK_EXCLUSIVE | LK_NOWAIT | LK_INTERLOCK, 862 VI_MTX(vp))) { 863 splx(s); 864 break; 865 } 866 867 if ((tbp->b_flags & (B_VMIO | B_CLUSTEROK | 868 B_INVAL | B_DELWRI | B_NEEDCOMMIT)) 869 != (B_DELWRI | B_CLUSTEROK | 870 (bp->b_flags & (B_VMIO | B_NEEDCOMMIT))) || 871 (tbp->b_flags & B_LOCKED) || 872 tbp->b_wcred != bp->b_wcred) { 873 BUF_UNLOCK(bp); 874 splx(s); 875 break; 876 } 877 878 /* 879 * Check that the combined cluster 880 * would make sense with regard to pages 881 * and would not be too large 882 */ 883 if ((tbp->b_bcount != size) || 884 ((bp->b_blkno + (dbsize * i)) != 885 tbp->b_blkno) || 886 ((tbp->b_npages + bp->b_npages) > 887 (vp->v_mount->mnt_iosize_max / PAGE_SIZE))) { 888 BUF_UNLOCK(tbp); 889 splx(s); 890 break; 891 } 892 /* 893 * Ok, it's passed all the tests, 894 * so remove it from the free list 895 * and mark it busy. We will use it. 896 */ 897 bremfree(tbp); 898 tbp->b_flags &= ~B_DONE; 899 splx(s); 900 } /* end of code for non-first buffers only */ 901 /* check for latent dependencies to be handled */ 902 if ((LIST_FIRST(&tbp->b_dep)) != NULL) 903 buf_start(tbp); 904 /* 905 * If the IO is via the VM then we do some 906 * special VM hackery (yuck). Since the buffer's 907 * block size may not be page-aligned it is possible 908 * for a page to be shared between two buffers. We 909 * have to get rid of the duplication when building 910 * the cluster. 911 */ 912 if (tbp->b_flags & B_VMIO) { 913 vm_page_t m; 914 915 if (i != 0) { /* if not first buffer */ 916 for (j = 0; j < tbp->b_npages; j += 1) { 917 m = tbp->b_pages[j]; 918 if (m->flags & PG_BUSY) { 919 bqrelse(tbp); 920 goto finishcluster; 921 } 922 } 923 } 924 vm_page_lock_queues(); 925 for (j = 0; j < tbp->b_npages; j += 1) { 926 m = tbp->b_pages[j]; 927 vm_page_io_start(m); 928 vm_object_pip_add(m->object, 1); 929 if ((bp->b_npages == 0) || 930 (bp->b_pages[bp->b_npages - 1] != m)) { 931 bp->b_pages[bp->b_npages] = m; 932 bp->b_npages++; 933 } 934 } 935 vm_page_unlock_queues(); 936 } 937 bp->b_bcount += size; 938 bp->b_bufsize += size; 939 940 s = splbio(); 941 bundirty(tbp); 942 tbp->b_flags &= ~B_DONE; 943 tbp->b_ioflags &= ~BIO_ERROR; 944 tbp->b_flags |= B_ASYNC; 945 tbp->b_iocmd = BIO_WRITE; 946 reassignbuf(tbp, tbp->b_vp); /* put on clean list */ 947 VI_LOCK(tbp->b_vp); 948 ++tbp->b_vp->v_numoutput; 949 VI_UNLOCK(tbp->b_vp); 950 splx(s); 951 BUF_KERNPROC(tbp); 952 TAILQ_INSERT_TAIL(&bp->b_cluster.cluster_head, 953 tbp, b_cluster.cluster_entry); 954 } 955 finishcluster: 956 pmap_qenter(trunc_page((vm_offset_t) bp->b_data), 957 (vm_page_t *) bp->b_pages, bp->b_npages); 958 if (bp->b_bufsize > bp->b_kvasize) 959 panic( 960 "cluster_wbuild: b_bufsize(%ld) > b_kvasize(%d)\n", 961 bp->b_bufsize, bp->b_kvasize); 962 bp->b_kvasize = bp->b_bufsize; 963 totalwritten += bp->b_bufsize; 964 bp->b_dirtyoff = 0; 965 bp->b_dirtyend = bp->b_bufsize; 966 bawrite(bp); 967 968 len -= i; 969 } 970 return totalwritten; 971} 972 973/* 974 * Collect together all the buffers in a cluster. 975 * Plus add one additional buffer. 976 */ 977static struct cluster_save * 978cluster_collectbufs(vp, last_bp) 979 struct vnode *vp; 980 struct buf *last_bp; 981{ 982 struct cluster_save *buflist; 983 struct buf *bp; 984 daddr_t lbn; 985 int i, len; 986 987 len = vp->v_lastw - vp->v_cstart + 1; 988 buflist = malloc(sizeof(struct buf *) * (len + 1) + sizeof(*buflist), 989 M_SEGMENT, M_WAITOK); 990 buflist->bs_nchildren = 0; 991 buflist->bs_children = (struct buf **) (buflist + 1); 992 for (lbn = vp->v_cstart, i = 0; i < len; lbn++, i++) { 993 (void) bread(vp, lbn, last_bp->b_bcount, NOCRED, &bp); 994 buflist->bs_children[i] = bp; 995 if (bp->b_blkno == bp->b_lblkno) 996 VOP_BMAP(bp->b_vp, bp->b_lblkno, NULL, &bp->b_blkno, 997 NULL, NULL); 998 } 999 buflist->bs_children[i] = bp = last_bp; 1000 if (bp->b_blkno == bp->b_lblkno) 1001 VOP_BMAP(bp->b_vp, bp->b_lblkno, NULL, &bp->b_blkno, 1002 NULL, NULL); 1003 buflist->bs_nchildren = i + 1; 1004 return (buflist); 1005} 1006