vfs_cluster.c revision 117879
1/*- 2 * Copyright (c) 1993 3 * The Regents of the University of California. All rights reserved. 4 * Modifications/enhancements: 5 * Copyright (c) 1995 John S. Dyson. All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 3. All advertising materials mentioning features or use of this software 16 * must display the following acknowledgement: 17 * This product includes software developed by the University of 18 * California, Berkeley and its contributors. 19 * 4. Neither the name of the University nor the names of its contributors 20 * may be used to endorse or promote products derived from this software 21 * without specific prior written permission. 22 * 23 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 24 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 25 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 26 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 27 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 28 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 29 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 30 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 31 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 32 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 33 * SUCH DAMAGE. 34 * 35 * @(#)vfs_cluster.c 8.7 (Berkeley) 2/13/94 36 */ 37 38#include <sys/cdefs.h> 39__FBSDID("$FreeBSD: head/sys/kern/vfs_cluster.c 117879 2003-07-22 10:36:36Z phk $"); 40 41#include "opt_debug_cluster.h" 42 43#include <sys/param.h> 44#include <sys/systm.h> 45#include <sys/kernel.h> 46#include <sys/proc.h> 47#include <sys/bio.h> 48#include <sys/buf.h> 49#include <sys/vnode.h> 50#include <sys/malloc.h> 51#include <sys/mount.h> 52#include <sys/resourcevar.h> 53#include <sys/vmmeter.h> 54#include <vm/vm.h> 55#include <vm/vm_object.h> 56#include <vm/vm_page.h> 57#include <sys/sysctl.h> 58 59#if defined(CLUSTERDEBUG) 60static int rcluster= 0; 61SYSCTL_INT(_debug, OID_AUTO, rcluster, CTLFLAG_RW, &rcluster, 0, 62 "Debug VFS clustering code"); 63#endif 64 65static MALLOC_DEFINE(M_SEGMENT, "cluster_save buffer", "cluster_save buffer"); 66 67static struct cluster_save * 68 cluster_collectbufs(struct vnode *vp, struct buf *last_bp); 69static struct buf * 70 cluster_rbuild(struct vnode *vp, u_quad_t filesize, daddr_t lbn, 71 daddr_t blkno, long size, int run, struct buf *fbp); 72 73static int write_behind = 1; 74SYSCTL_INT(_vfs, OID_AUTO, write_behind, CTLFLAG_RW, &write_behind, 0, 75 "Cluster write-behind; 0: disable, 1: enable, 2: backed off"); 76 77static int read_max = 8; 78SYSCTL_INT(_vfs, OID_AUTO, read_max, CTLFLAG_RW, &read_max, 0, 79 "Cluster read-ahead max block count"); 80 81/* Page expended to mark partially backed buffers */ 82extern vm_page_t bogus_page; 83 84/* 85 * Number of physical bufs (pbufs) this subsystem is allowed. 86 * Manipulated by vm_pager.c 87 */ 88extern int cluster_pbuf_freecnt; 89 90/* 91 * Read data to a buf, including read-ahead if we find this to be beneficial. 92 * cluster_read replaces bread. 93 */ 94int 95cluster_read(vp, filesize, lblkno, size, cred, totread, seqcount, bpp) 96 struct vnode *vp; 97 u_quad_t filesize; 98 daddr_t lblkno; 99 long size; 100 struct ucred *cred; 101 long totread; 102 int seqcount; 103 struct buf **bpp; 104{ 105 struct buf *bp, *rbp, *reqbp; 106 daddr_t blkno, origblkno; 107 int maxra, racluster; 108 int error, ncontig; 109 int i; 110 111 error = 0; 112 113 /* 114 * Try to limit the amount of read-ahead by a few 115 * ad-hoc parameters. This needs work!!! 116 */ 117 racluster = vp->v_mount->mnt_iosize_max / size; 118 maxra = seqcount; 119 maxra = min(read_max, maxra); 120 maxra = min(nbuf/8, maxra); 121 if (((u_quad_t)(lblkno + maxra + 1) * size) > filesize) 122 maxra = (filesize / size) - lblkno; 123 124 /* 125 * get the requested block 126 */ 127 *bpp = reqbp = bp = getblk(vp, lblkno, size, 0, 0, 0); 128 origblkno = lblkno; 129 130 /* 131 * if it is in the cache, then check to see if the reads have been 132 * sequential. If they have, then try some read-ahead, otherwise 133 * back-off on prospective read-aheads. 134 */ 135 if (bp->b_flags & B_CACHE) { 136 if (!seqcount) { 137 return 0; 138 } else if ((bp->b_flags & B_RAM) == 0) { 139 return 0; 140 } else { 141 int s; 142 bp->b_flags &= ~B_RAM; 143 /* 144 * We do the spl here so that there is no window 145 * between the incore and the b_usecount increment 146 * below. We opt to keep the spl out of the loop 147 * for efficiency. 148 */ 149 s = splbio(); 150 VI_LOCK(vp); 151 for (i = 1; i < maxra; i++) { 152 /* 153 * Stop if the buffer does not exist or it 154 * is invalid (about to go away?) 155 */ 156 rbp = gbincore(vp, lblkno+i); 157 if (rbp == NULL || (rbp->b_flags & B_INVAL)) 158 break; 159 160 /* 161 * Set another read-ahead mark so we know 162 * to check again. 163 */ 164 if (((i % racluster) == (racluster - 1)) || 165 (i == (maxra - 1))) 166 rbp->b_flags |= B_RAM; 167 } 168 VI_UNLOCK(vp); 169 splx(s); 170 if (i >= maxra) { 171 return 0; 172 } 173 lblkno += i; 174 } 175 reqbp = bp = NULL; 176 /* 177 * If it isn't in the cache, then get a chunk from 178 * disk if sequential, otherwise just get the block. 179 */ 180 } else { 181 off_t firstread = bp->b_offset; 182 int nblks; 183 184 KASSERT(bp->b_offset != NOOFFSET, 185 ("cluster_read: no buffer offset")); 186 187 ncontig = 0; 188 189 /* 190 * Compute the total number of blocks that we should read 191 * synchronously. 192 */ 193 if (firstread + totread > filesize) 194 totread = filesize - firstread; 195 nblks = howmany(totread, size); 196 if (nblks > racluster) 197 nblks = racluster; 198 199 /* 200 * Now compute the number of contiguous blocks. 201 */ 202 if (nblks > 1) { 203 error = VOP_BMAP(vp, lblkno, NULL, 204 &blkno, &ncontig, NULL); 205 /* 206 * If this failed to map just do the original block. 207 */ 208 if (error || blkno == -1) 209 ncontig = 0; 210 } 211 212 /* 213 * If we have contiguous data available do a cluster 214 * otherwise just read the requested block. 215 */ 216 if (ncontig) { 217 /* Account for our first block. */ 218 ncontig = min(ncontig + 1, nblks); 219 if (ncontig < nblks) 220 nblks = ncontig; 221 bp = cluster_rbuild(vp, filesize, lblkno, 222 blkno, size, nblks, bp); 223 lblkno += (bp->b_bufsize / size); 224 } else { 225 bp->b_flags |= B_RAM; 226 bp->b_iocmd = BIO_READ; 227 lblkno += 1; 228 } 229 } 230 231 /* 232 * handle the synchronous read so that it is available ASAP. 233 */ 234 if (bp) { 235 if ((bp->b_flags & B_CLUSTER) == 0) { 236 vfs_busy_pages(bp, 0); 237 } 238 bp->b_flags &= ~B_INVAL; 239 bp->b_ioflags &= ~BIO_ERROR; 240 if ((bp->b_flags & B_ASYNC) || bp->b_iodone != NULL) 241 BUF_KERNPROC(bp); 242 error = VOP_STRATEGY(vp, bp); 243 curproc->p_stats->p_ru.ru_inblock++; 244 if (error) 245 return (error); 246 } 247 248 /* 249 * If we have been doing sequential I/O, then do some read-ahead. 250 */ 251 while (lblkno < (origblkno + maxra)) { 252 error = VOP_BMAP(vp, lblkno, NULL, &blkno, &ncontig, NULL); 253 if (error) 254 break; 255 256 if (blkno == -1) 257 break; 258 259 /* 260 * We could throttle ncontig here by maxra but we might as 261 * well read the data if it is contiguous. We're throttled 262 * by racluster anyway. 263 */ 264 if (ncontig) { 265 ncontig = min(ncontig + 1, racluster); 266 rbp = cluster_rbuild(vp, filesize, lblkno, blkno, 267 size, ncontig, NULL); 268 lblkno += (rbp->b_bufsize / size); 269 if (rbp->b_flags & B_DELWRI) { 270 bqrelse(rbp); 271 continue; 272 } 273 } else { 274 rbp = getblk(vp, lblkno, size, 0, 0, 0); 275 lblkno += 1; 276 if (rbp->b_flags & B_DELWRI) { 277 bqrelse(rbp); 278 continue; 279 } 280 rbp->b_flags |= B_ASYNC | B_RAM; 281 rbp->b_iocmd = BIO_READ; 282 rbp->b_blkno = blkno; 283 } 284 if (rbp->b_flags & B_CACHE) { 285 rbp->b_flags &= ~B_ASYNC; 286 bqrelse(rbp); 287 continue; 288 } 289 if ((rbp->b_flags & B_CLUSTER) == 0) { 290 vfs_busy_pages(rbp, 0); 291 } 292 rbp->b_flags &= ~B_INVAL; 293 rbp->b_ioflags &= ~BIO_ERROR; 294 if ((rbp->b_flags & B_ASYNC) || rbp->b_iodone != NULL) 295 BUF_KERNPROC(rbp); 296 (void) VOP_STRATEGY(vp, rbp); 297 curproc->p_stats->p_ru.ru_inblock++; 298 } 299 300 if (reqbp) 301 return (bufwait(reqbp)); 302 else 303 return (error); 304} 305 306/* 307 * If blocks are contiguous on disk, use this to provide clustered 308 * read ahead. We will read as many blocks as possible sequentially 309 * and then parcel them up into logical blocks in the buffer hash table. 310 */ 311static struct buf * 312cluster_rbuild(vp, filesize, lbn, blkno, size, run, fbp) 313 struct vnode *vp; 314 u_quad_t filesize; 315 daddr_t lbn; 316 daddr_t blkno; 317 long size; 318 int run; 319 struct buf *fbp; 320{ 321 struct buf *bp, *tbp; 322 daddr_t bn; 323 int i, inc, j; 324 325 GIANT_REQUIRED; 326 327 KASSERT(size == vp->v_mount->mnt_stat.f_iosize, 328 ("cluster_rbuild: size %ld != filesize %ld\n", 329 size, vp->v_mount->mnt_stat.f_iosize)); 330 331 /* 332 * avoid a division 333 */ 334 while ((u_quad_t) size * (lbn + run) > filesize) { 335 --run; 336 } 337 338 if (fbp) { 339 tbp = fbp; 340 tbp->b_iocmd = BIO_READ; 341 } else { 342 tbp = getblk(vp, lbn, size, 0, 0, 0); 343 if (tbp->b_flags & B_CACHE) 344 return tbp; 345 tbp->b_flags |= B_ASYNC | B_RAM; 346 tbp->b_iocmd = BIO_READ; 347 } 348 349 tbp->b_blkno = blkno; 350 if( (tbp->b_flags & B_MALLOC) || 351 ((tbp->b_flags & B_VMIO) == 0) || (run <= 1) ) 352 return tbp; 353 354 bp = trypbuf(&cluster_pbuf_freecnt); 355 if (bp == 0) 356 return tbp; 357 358 /* 359 * We are synthesizing a buffer out of vm_page_t's, but 360 * if the block size is not page aligned then the starting 361 * address may not be either. Inherit the b_data offset 362 * from the original buffer. 363 */ 364 bp->b_data = (char *)((vm_offset_t)bp->b_data | 365 ((vm_offset_t)tbp->b_data & PAGE_MASK)); 366 bp->b_flags = B_ASYNC | B_CLUSTER | B_VMIO; 367 bp->b_iocmd = BIO_READ; 368 bp->b_iodone = cluster_callback; 369 bp->b_blkno = blkno; 370 bp->b_lblkno = lbn; 371 bp->b_offset = tbp->b_offset; 372 KASSERT(bp->b_offset != NOOFFSET, ("cluster_rbuild: no buffer offset")); 373 pbgetvp(vp, bp); 374 375 TAILQ_INIT(&bp->b_cluster.cluster_head); 376 377 bp->b_bcount = 0; 378 bp->b_bufsize = 0; 379 bp->b_npages = 0; 380 381 inc = btodb(size); 382 for (bn = blkno, i = 0; i < run; ++i, bn += inc) { 383 if (i != 0) { 384 if ((bp->b_npages * PAGE_SIZE) + 385 round_page(size) > vp->v_mount->mnt_iosize_max) { 386 break; 387 } 388 389 tbp = getblk(vp, lbn + i, size, 0, 0, GB_LOCK_NOWAIT); 390 391 /* Don't wait around for locked bufs. */ 392 if (tbp == NULL) 393 break; 394 395 /* 396 * Stop scanning if the buffer is fully valid 397 * (marked B_CACHE), or locked (may be doing a 398 * background write), or if the buffer is not 399 * VMIO backed. The clustering code can only deal 400 * with VMIO-backed buffers. 401 */ 402 if ((tbp->b_flags & (B_CACHE|B_LOCKED)) || 403 (tbp->b_flags & B_VMIO) == 0) { 404 bqrelse(tbp); 405 break; 406 } 407 408 /* 409 * The buffer must be completely invalid in order to 410 * take part in the cluster. If it is partially valid 411 * then we stop. 412 */ 413 for (j = 0;j < tbp->b_npages; j++) { 414 if (tbp->b_pages[j]->valid) 415 break; 416 } 417 if (j != tbp->b_npages) { 418 bqrelse(tbp); 419 break; 420 } 421 422 /* 423 * Set a read-ahead mark as appropriate 424 */ 425 if ((fbp && (i == 1)) || (i == (run - 1))) 426 tbp->b_flags |= B_RAM; 427 428 /* 429 * Set the buffer up for an async read (XXX should 430 * we do this only if we do not wind up brelse()ing?). 431 * Set the block number if it isn't set, otherwise 432 * if it is make sure it matches the block number we 433 * expect. 434 */ 435 tbp->b_flags |= B_ASYNC; 436 tbp->b_iocmd = BIO_READ; 437 if (tbp->b_blkno == tbp->b_lblkno) { 438 tbp->b_blkno = bn; 439 } else if (tbp->b_blkno != bn) { 440 brelse(tbp); 441 break; 442 } 443 } 444 /* 445 * XXX fbp from caller may not be B_ASYNC, but we are going 446 * to biodone() it in cluster_callback() anyway 447 */ 448 BUF_KERNPROC(tbp); 449 TAILQ_INSERT_TAIL(&bp->b_cluster.cluster_head, 450 tbp, b_cluster.cluster_entry); 451 if (tbp->b_object != NULL) 452 VM_OBJECT_LOCK(tbp->b_object); 453 vm_page_lock_queues(); 454 for (j = 0; j < tbp->b_npages; j += 1) { 455 vm_page_t m; 456 m = tbp->b_pages[j]; 457 vm_page_io_start(m); 458 vm_object_pip_add(m->object, 1); 459 if ((bp->b_npages == 0) || 460 (bp->b_pages[bp->b_npages-1] != m)) { 461 bp->b_pages[bp->b_npages] = m; 462 bp->b_npages++; 463 } 464 if ((m->valid & VM_PAGE_BITS_ALL) == VM_PAGE_BITS_ALL) 465 tbp->b_pages[j] = bogus_page; 466 } 467 vm_page_unlock_queues(); 468 if (tbp->b_object != NULL) 469 VM_OBJECT_UNLOCK(tbp->b_object); 470 /* 471 * XXX shouldn't this be += size for both, like in 472 * cluster_wbuild()? 473 * 474 * Don't inherit tbp->b_bufsize as it may be larger due to 475 * a non-page-aligned size. Instead just aggregate using 476 * 'size'. 477 */ 478 if (tbp->b_bcount != size) 479 printf("warning: tbp->b_bcount wrong %ld vs %ld\n", tbp->b_bcount, size); 480 if (tbp->b_bufsize != size) 481 printf("warning: tbp->b_bufsize wrong %ld vs %ld\n", tbp->b_bufsize, size); 482 bp->b_bcount += size; 483 bp->b_bufsize += size; 484 } 485 486 /* 487 * Fully valid pages in the cluster are already good and do not need 488 * to be re-read from disk. Replace the page with bogus_page 489 */ 490 for (j = 0; j < bp->b_npages; j++) { 491 if ((bp->b_pages[j]->valid & VM_PAGE_BITS_ALL) == 492 VM_PAGE_BITS_ALL) { 493 bp->b_pages[j] = bogus_page; 494 } 495 } 496 if (bp->b_bufsize > bp->b_kvasize) 497 panic("cluster_rbuild: b_bufsize(%ld) > b_kvasize(%d)\n", 498 bp->b_bufsize, bp->b_kvasize); 499 bp->b_kvasize = bp->b_bufsize; 500 501 pmap_qenter(trunc_page((vm_offset_t) bp->b_data), 502 (vm_page_t *)bp->b_pages, bp->b_npages); 503 return (bp); 504} 505 506/* 507 * Cleanup after a clustered read or write. 508 * This is complicated by the fact that any of the buffers might have 509 * extra memory (if there were no empty buffer headers at allocbuf time) 510 * that we will need to shift around. 511 */ 512void 513cluster_callback(bp) 514 struct buf *bp; 515{ 516 struct buf *nbp, *tbp; 517 int error = 0; 518 519 GIANT_REQUIRED; 520 521 /* 522 * Must propogate errors to all the components. 523 */ 524 if (bp->b_ioflags & BIO_ERROR) 525 error = bp->b_error; 526 527 pmap_qremove(trunc_page((vm_offset_t) bp->b_data), bp->b_npages); 528 /* 529 * Move memory from the large cluster buffer into the component 530 * buffers and mark IO as done on these. 531 */ 532 for (tbp = TAILQ_FIRST(&bp->b_cluster.cluster_head); 533 tbp; tbp = nbp) { 534 nbp = TAILQ_NEXT(&tbp->b_cluster, cluster_entry); 535 if (error) { 536 tbp->b_ioflags |= BIO_ERROR; 537 tbp->b_error = error; 538 } else { 539 tbp->b_dirtyoff = tbp->b_dirtyend = 0; 540 tbp->b_flags &= ~B_INVAL; 541 tbp->b_ioflags &= ~BIO_ERROR; 542 /* 543 * XXX the bdwrite()/bqrelse() issued during 544 * cluster building clears B_RELBUF (see bqrelse() 545 * comment). If direct I/O was specified, we have 546 * to restore it here to allow the buffer and VM 547 * to be freed. 548 */ 549 if (tbp->b_flags & B_DIRECT) 550 tbp->b_flags |= B_RELBUF; 551 } 552 bufdone(tbp); 553 } 554 relpbuf(bp, &cluster_pbuf_freecnt); 555} 556 557/* 558 * cluster_wbuild_wb: 559 * 560 * Implement modified write build for cluster. 561 * 562 * write_behind = 0 write behind disabled 563 * write_behind = 1 write behind normal (default) 564 * write_behind = 2 write behind backed-off 565 */ 566 567static __inline int 568cluster_wbuild_wb(struct vnode *vp, long size, daddr_t start_lbn, int len) 569{ 570 int r = 0; 571 572 switch(write_behind) { 573 case 2: 574 if (start_lbn < len) 575 break; 576 start_lbn -= len; 577 /* FALLTHROUGH */ 578 case 1: 579 r = cluster_wbuild(vp, size, start_lbn, len); 580 /* FALLTHROUGH */ 581 default: 582 /* FALLTHROUGH */ 583 break; 584 } 585 return(r); 586} 587 588/* 589 * Do clustered write for FFS. 590 * 591 * Three cases: 592 * 1. Write is not sequential (write asynchronously) 593 * Write is sequential: 594 * 2. beginning of cluster - begin cluster 595 * 3. middle of a cluster - add to cluster 596 * 4. end of a cluster - asynchronously write cluster 597 */ 598void 599cluster_write(bp, filesize, seqcount) 600 struct buf *bp; 601 u_quad_t filesize; 602 int seqcount; 603{ 604 struct vnode *vp; 605 daddr_t lbn; 606 int maxclen, cursize; 607 int lblocksize; 608 int async; 609 610 vp = bp->b_vp; 611 if (vp->v_type == VREG) { 612 async = vp->v_mount->mnt_flag & MNT_ASYNC; 613 lblocksize = vp->v_mount->mnt_stat.f_iosize; 614 } else { 615 async = 0; 616 lblocksize = bp->b_bufsize; 617 } 618 lbn = bp->b_lblkno; 619 KASSERT(bp->b_offset != NOOFFSET, ("cluster_write: no buffer offset")); 620 621 /* Initialize vnode to beginning of file. */ 622 if (lbn == 0) 623 vp->v_lasta = vp->v_clen = vp->v_cstart = vp->v_lastw = 0; 624 625 if (vp->v_clen == 0 || lbn != vp->v_lastw + 1 || 626 (bp->b_blkno != vp->v_lasta + btodb(lblocksize))) { 627 maxclen = vp->v_mount->mnt_iosize_max / lblocksize - 1; 628 if (vp->v_clen != 0) { 629 /* 630 * Next block is not sequential. 631 * 632 * If we are not writing at end of file, the process 633 * seeked to another point in the file since its last 634 * write, or we have reached our maximum cluster size, 635 * then push the previous cluster. Otherwise try 636 * reallocating to make it sequential. 637 * 638 * Change to algorithm: only push previous cluster if 639 * it was sequential from the point of view of the 640 * seqcount heuristic, otherwise leave the buffer 641 * intact so we can potentially optimize the I/O 642 * later on in the buf_daemon or update daemon 643 * flush. 644 */ 645 cursize = vp->v_lastw - vp->v_cstart + 1; 646 if (((u_quad_t) bp->b_offset + lblocksize) != filesize || 647 lbn != vp->v_lastw + 1 || vp->v_clen <= cursize) { 648 if (!async && seqcount > 0) { 649 cluster_wbuild_wb(vp, lblocksize, 650 vp->v_cstart, cursize); 651 } 652 } else { 653 struct buf **bpp, **endbp; 654 struct cluster_save *buflist; 655 656 buflist = cluster_collectbufs(vp, bp); 657 endbp = &buflist->bs_children 658 [buflist->bs_nchildren - 1]; 659 if (VOP_REALLOCBLKS(vp, buflist)) { 660 /* 661 * Failed, push the previous cluster 662 * if *really* writing sequentially 663 * in the logical file (seqcount > 1), 664 * otherwise delay it in the hopes that 665 * the low level disk driver can 666 * optimize the write ordering. 667 */ 668 for (bpp = buflist->bs_children; 669 bpp < endbp; bpp++) 670 brelse(*bpp); 671 free(buflist, M_SEGMENT); 672 if (seqcount > 1) { 673 cluster_wbuild_wb(vp, 674 lblocksize, vp->v_cstart, 675 cursize); 676 } 677 } else { 678 /* 679 * Succeeded, keep building cluster. 680 */ 681 for (bpp = buflist->bs_children; 682 bpp <= endbp; bpp++) 683 bdwrite(*bpp); 684 free(buflist, M_SEGMENT); 685 vp->v_lastw = lbn; 686 vp->v_lasta = bp->b_blkno; 687 return; 688 } 689 } 690 } 691 /* 692 * Consider beginning a cluster. If at end of file, make 693 * cluster as large as possible, otherwise find size of 694 * existing cluster. 695 */ 696 if ((vp->v_type == VREG) && 697 ((u_quad_t) bp->b_offset + lblocksize) != filesize && 698 (bp->b_blkno == bp->b_lblkno) && 699 (VOP_BMAP(vp, lbn, NULL, &bp->b_blkno, &maxclen, NULL) || 700 bp->b_blkno == -1)) { 701 bawrite(bp); 702 vp->v_clen = 0; 703 vp->v_lasta = bp->b_blkno; 704 vp->v_cstart = lbn + 1; 705 vp->v_lastw = lbn; 706 return; 707 } 708 vp->v_clen = maxclen; 709 if (!async && maxclen == 0) { /* I/O not contiguous */ 710 vp->v_cstart = lbn + 1; 711 bawrite(bp); 712 } else { /* Wait for rest of cluster */ 713 vp->v_cstart = lbn; 714 bdwrite(bp); 715 } 716 } else if (lbn == vp->v_cstart + vp->v_clen) { 717 /* 718 * At end of cluster, write it out if seqcount tells us we 719 * are operating sequentially, otherwise let the buf or 720 * update daemon handle it. 721 */ 722 bdwrite(bp); 723 if (seqcount > 1) 724 cluster_wbuild_wb(vp, lblocksize, vp->v_cstart, vp->v_clen + 1); 725 vp->v_clen = 0; 726 vp->v_cstart = lbn + 1; 727 } else if (vm_page_count_severe()) { 728 /* 729 * We are low on memory, get it going NOW 730 */ 731 bawrite(bp); 732 } else { 733 /* 734 * In the middle of a cluster, so just delay the I/O for now. 735 */ 736 bdwrite(bp); 737 } 738 vp->v_lastw = lbn; 739 vp->v_lasta = bp->b_blkno; 740} 741 742 743/* 744 * This is an awful lot like cluster_rbuild...wish they could be combined. 745 * The last lbn argument is the current block on which I/O is being 746 * performed. Check to see that it doesn't fall in the middle of 747 * the current block (if last_bp == NULL). 748 */ 749int 750cluster_wbuild(vp, size, start_lbn, len) 751 struct vnode *vp; 752 long size; 753 daddr_t start_lbn; 754 int len; 755{ 756 struct buf *bp, *tbp; 757 int i, j, s; 758 int totalwritten = 0; 759 int dbsize = btodb(size); 760 761 GIANT_REQUIRED; 762 763 while (len > 0) { 764 s = splbio(); 765 /* 766 * If the buffer is not delayed-write (i.e. dirty), or it 767 * is delayed-write but either locked or inval, it cannot 768 * partake in the clustered write. 769 */ 770 VI_LOCK(vp); 771 if ((tbp = gbincore(vp, start_lbn)) == NULL) { 772 VI_UNLOCK(vp); 773 ++start_lbn; 774 --len; 775 splx(s); 776 continue; 777 } 778 if (BUF_LOCK(tbp, 779 LK_EXCLUSIVE | LK_NOWAIT | LK_INTERLOCK, VI_MTX(vp))) { 780 ++start_lbn; 781 --len; 782 splx(s); 783 continue; 784 } 785 if ((tbp->b_flags & (B_LOCKED | B_INVAL | B_DELWRI)) != 786 B_DELWRI) { 787 BUF_UNLOCK(tbp); 788 ++start_lbn; 789 --len; 790 splx(s); 791 continue; 792 } 793 bremfree(tbp); 794 tbp->b_flags &= ~B_DONE; 795 splx(s); 796 797 /* 798 * Extra memory in the buffer, punt on this buffer. 799 * XXX we could handle this in most cases, but we would 800 * have to push the extra memory down to after our max 801 * possible cluster size and then potentially pull it back 802 * up if the cluster was terminated prematurely--too much 803 * hassle. 804 */ 805 if (((tbp->b_flags & (B_CLUSTEROK | B_MALLOC | B_VMIO)) != 806 (B_CLUSTEROK | B_VMIO)) || 807 (tbp->b_bcount != tbp->b_bufsize) || 808 (tbp->b_bcount != size) || 809 (len == 1) || 810 ((bp = getpbuf(&cluster_pbuf_freecnt)) == NULL)) { 811 totalwritten += tbp->b_bufsize; 812 bawrite(tbp); 813 ++start_lbn; 814 --len; 815 continue; 816 } 817 818 /* 819 * We got a pbuf to make the cluster in. 820 * so initialise it. 821 */ 822 TAILQ_INIT(&bp->b_cluster.cluster_head); 823 bp->b_bcount = 0; 824 bp->b_magic = tbp->b_magic; 825 bp->b_op = tbp->b_op; 826 bp->b_bufsize = 0; 827 bp->b_npages = 0; 828 if (tbp->b_wcred != NOCRED) 829 bp->b_wcred = crhold(tbp->b_wcred); 830 831 bp->b_blkno = tbp->b_blkno; 832 bp->b_lblkno = tbp->b_lblkno; 833 bp->b_offset = tbp->b_offset; 834 835 /* 836 * We are synthesizing a buffer out of vm_page_t's, but 837 * if the block size is not page aligned then the starting 838 * address may not be either. Inherit the b_data offset 839 * from the original buffer. 840 */ 841 bp->b_data = (char *)((vm_offset_t)bp->b_data | 842 ((vm_offset_t)tbp->b_data & PAGE_MASK)); 843 bp->b_flags |= B_CLUSTER | 844 (tbp->b_flags & (B_VMIO | B_NEEDCOMMIT)); 845 bp->b_iodone = cluster_callback; 846 pbgetvp(vp, bp); 847 /* 848 * From this location in the file, scan forward to see 849 * if there are buffers with adjacent data that need to 850 * be written as well. 851 */ 852 for (i = 0; i < len; ++i, ++start_lbn) { 853 if (i != 0) { /* If not the first buffer */ 854 s = splbio(); 855 /* 856 * If the adjacent data is not even in core it 857 * can't need to be written. 858 */ 859 VI_LOCK(vp); 860 if ((tbp = gbincore(vp, start_lbn)) == NULL) { 861 VI_UNLOCK(vp); 862 splx(s); 863 break; 864 } 865 866 /* 867 * If it IS in core, but has different 868 * characteristics, or is locked (which 869 * means it could be undergoing a background 870 * I/O or be in a weird state), then don't 871 * cluster with it. 872 */ 873 if (BUF_LOCK(tbp, 874 LK_EXCLUSIVE | LK_NOWAIT | LK_INTERLOCK, 875 VI_MTX(vp))) { 876 splx(s); 877 break; 878 } 879 880 if ((tbp->b_flags & (B_VMIO | B_CLUSTEROK | 881 B_INVAL | B_DELWRI | B_NEEDCOMMIT)) 882 != (B_DELWRI | B_CLUSTEROK | 883 (bp->b_flags & (B_VMIO | B_NEEDCOMMIT))) || 884 (tbp->b_flags & B_LOCKED) || 885 tbp->b_wcred != bp->b_wcred) { 886 BUF_UNLOCK(tbp); 887 splx(s); 888 break; 889 } 890 891 /* 892 * Check that the combined cluster 893 * would make sense with regard to pages 894 * and would not be too large 895 */ 896 if ((tbp->b_bcount != size) || 897 ((bp->b_blkno + (dbsize * i)) != 898 tbp->b_blkno) || 899 ((tbp->b_npages + bp->b_npages) > 900 (vp->v_mount->mnt_iosize_max / PAGE_SIZE))) { 901 BUF_UNLOCK(tbp); 902 splx(s); 903 break; 904 } 905 /* 906 * Ok, it's passed all the tests, 907 * so remove it from the free list 908 * and mark it busy. We will use it. 909 */ 910 bremfree(tbp); 911 tbp->b_flags &= ~B_DONE; 912 splx(s); 913 } /* end of code for non-first buffers only */ 914 /* check for latent dependencies to be handled */ 915 if ((LIST_FIRST(&tbp->b_dep)) != NULL) { 916 tbp->b_iocmd = BIO_WRITE; 917 buf_start(tbp); 918 } 919 /* 920 * If the IO is via the VM then we do some 921 * special VM hackery (yuck). Since the buffer's 922 * block size may not be page-aligned it is possible 923 * for a page to be shared between two buffers. We 924 * have to get rid of the duplication when building 925 * the cluster. 926 */ 927 if (tbp->b_flags & B_VMIO) { 928 vm_page_t m; 929 930 if (i != 0) { /* if not first buffer */ 931 for (j = 0; j < tbp->b_npages; j += 1) { 932 m = tbp->b_pages[j]; 933 if (m->flags & PG_BUSY) { 934 bqrelse(tbp); 935 goto finishcluster; 936 } 937 } 938 } 939 if (tbp->b_object != NULL) 940 VM_OBJECT_LOCK(tbp->b_object); 941 vm_page_lock_queues(); 942 for (j = 0; j < tbp->b_npages; j += 1) { 943 m = tbp->b_pages[j]; 944 vm_page_io_start(m); 945 vm_object_pip_add(m->object, 1); 946 if ((bp->b_npages == 0) || 947 (bp->b_pages[bp->b_npages - 1] != m)) { 948 bp->b_pages[bp->b_npages] = m; 949 bp->b_npages++; 950 } 951 } 952 vm_page_unlock_queues(); 953 if (tbp->b_object != NULL) 954 VM_OBJECT_UNLOCK(tbp->b_object); 955 } 956 bp->b_bcount += size; 957 bp->b_bufsize += size; 958 959 s = splbio(); 960 bundirty(tbp); 961 tbp->b_flags &= ~B_DONE; 962 tbp->b_ioflags &= ~BIO_ERROR; 963 tbp->b_flags |= B_ASYNC; 964 tbp->b_iocmd = BIO_WRITE; 965 reassignbuf(tbp, tbp->b_vp); /* put on clean list */ 966 VI_LOCK(tbp->b_vp); 967 ++tbp->b_vp->v_numoutput; 968 VI_UNLOCK(tbp->b_vp); 969 splx(s); 970 BUF_KERNPROC(tbp); 971 TAILQ_INSERT_TAIL(&bp->b_cluster.cluster_head, 972 tbp, b_cluster.cluster_entry); 973 } 974 finishcluster: 975 pmap_qenter(trunc_page((vm_offset_t) bp->b_data), 976 (vm_page_t *) bp->b_pages, bp->b_npages); 977 if (bp->b_bufsize > bp->b_kvasize) 978 panic( 979 "cluster_wbuild: b_bufsize(%ld) > b_kvasize(%d)\n", 980 bp->b_bufsize, bp->b_kvasize); 981 bp->b_kvasize = bp->b_bufsize; 982 totalwritten += bp->b_bufsize; 983 bp->b_dirtyoff = 0; 984 bp->b_dirtyend = bp->b_bufsize; 985 bawrite(bp); 986 987 len -= i; 988 } 989 return totalwritten; 990} 991 992/* 993 * Collect together all the buffers in a cluster. 994 * Plus add one additional buffer. 995 */ 996static struct cluster_save * 997cluster_collectbufs(vp, last_bp) 998 struct vnode *vp; 999 struct buf *last_bp; 1000{ 1001 struct cluster_save *buflist; 1002 struct buf *bp; 1003 daddr_t lbn; 1004 int i, len; 1005 1006 len = vp->v_lastw - vp->v_cstart + 1; 1007 buflist = malloc(sizeof(struct buf *) * (len + 1) + sizeof(*buflist), 1008 M_SEGMENT, M_WAITOK); 1009 buflist->bs_nchildren = 0; 1010 buflist->bs_children = (struct buf **) (buflist + 1); 1011 for (lbn = vp->v_cstart, i = 0; i < len; lbn++, i++) { 1012 (void) bread(vp, lbn, last_bp->b_bcount, NOCRED, &bp); 1013 buflist->bs_children[i] = bp; 1014 if (bp->b_blkno == bp->b_lblkno) 1015 VOP_BMAP(bp->b_vp, bp->b_lblkno, NULL, &bp->b_blkno, 1016 NULL, NULL); 1017 } 1018 buflist->bs_children[i] = bp = last_bp; 1019 if (bp->b_blkno == bp->b_lblkno) 1020 VOP_BMAP(bp->b_vp, bp->b_lblkno, NULL, &bp->b_blkno, 1021 NULL, NULL); 1022 buflist->bs_nchildren = i + 1; 1023 return (buflist); 1024} 1025