vfs_cluster.c revision 121269
1/*- 2 * Copyright (c) 1993 3 * The Regents of the University of California. All rights reserved. 4 * Modifications/enhancements: 5 * Copyright (c) 1995 John S. Dyson. All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 3. All advertising materials mentioning features or use of this software 16 * must display the following acknowledgement: 17 * This product includes software developed by the University of 18 * California, Berkeley and its contributors. 19 * 4. Neither the name of the University nor the names of its contributors 20 * may be used to endorse or promote products derived from this software 21 * without specific prior written permission. 22 * 23 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 24 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 25 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 26 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 27 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 28 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 29 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 30 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 31 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 32 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 33 * SUCH DAMAGE. 34 * 35 * @(#)vfs_cluster.c 8.7 (Berkeley) 2/13/94 36 */ 37 38#include <sys/cdefs.h> 39__FBSDID("$FreeBSD: head/sys/kern/vfs_cluster.c 121269 2003-10-20 05:57:55Z alc $"); 40 41#include "opt_debug_cluster.h" 42 43#include <sys/param.h> 44#include <sys/systm.h> 45#include <sys/kernel.h> 46#include <sys/proc.h> 47#include <sys/bio.h> 48#include <sys/buf.h> 49#include <sys/vnode.h> 50#include <sys/malloc.h> 51#include <sys/mount.h> 52#include <sys/resourcevar.h> 53#include <sys/vmmeter.h> 54#include <vm/vm.h> 55#include <vm/vm_object.h> 56#include <vm/vm_page.h> 57#include <sys/sysctl.h> 58 59#if defined(CLUSTERDEBUG) 60static int rcluster= 0; 61SYSCTL_INT(_debug, OID_AUTO, rcluster, CTLFLAG_RW, &rcluster, 0, 62 "Debug VFS clustering code"); 63#endif 64 65static MALLOC_DEFINE(M_SEGMENT, "cluster_save buffer", "cluster_save buffer"); 66 67static struct cluster_save * 68 cluster_collectbufs(struct vnode *vp, struct buf *last_bp); 69static struct buf * 70 cluster_rbuild(struct vnode *vp, u_quad_t filesize, daddr_t lbn, 71 daddr_t blkno, long size, int run, struct buf *fbp); 72 73static int write_behind = 1; 74SYSCTL_INT(_vfs, OID_AUTO, write_behind, CTLFLAG_RW, &write_behind, 0, 75 "Cluster write-behind; 0: disable, 1: enable, 2: backed off"); 76 77static int read_max = 8; 78SYSCTL_INT(_vfs, OID_AUTO, read_max, CTLFLAG_RW, &read_max, 0, 79 "Cluster read-ahead max block count"); 80 81/* Page expended to mark partially backed buffers */ 82extern vm_page_t bogus_page; 83 84/* 85 * Number of physical bufs (pbufs) this subsystem is allowed. 86 * Manipulated by vm_pager.c 87 */ 88extern int cluster_pbuf_freecnt; 89 90/* 91 * Read data to a buf, including read-ahead if we find this to be beneficial. 92 * cluster_read replaces bread. 93 */ 94int 95cluster_read(vp, filesize, lblkno, size, cred, totread, seqcount, bpp) 96 struct vnode *vp; 97 u_quad_t filesize; 98 daddr_t lblkno; 99 long size; 100 struct ucred *cred; 101 long totread; 102 int seqcount; 103 struct buf **bpp; 104{ 105 struct buf *bp, *rbp, *reqbp; 106 daddr_t blkno, origblkno; 107 int maxra, racluster; 108 int error, ncontig; 109 int i; 110 111 error = 0; 112 113 /* 114 * Try to limit the amount of read-ahead by a few 115 * ad-hoc parameters. This needs work!!! 116 */ 117 racluster = vp->v_mount->mnt_iosize_max / size; 118 maxra = seqcount; 119 maxra = min(read_max, maxra); 120 maxra = min(nbuf/8, maxra); 121 if (((u_quad_t)(lblkno + maxra + 1) * size) > filesize) 122 maxra = (filesize / size) - lblkno; 123 124 /* 125 * get the requested block 126 */ 127 *bpp = reqbp = bp = getblk(vp, lblkno, size, 0, 0, 0); 128 origblkno = lblkno; 129 130 /* 131 * if it is in the cache, then check to see if the reads have been 132 * sequential. If they have, then try some read-ahead, otherwise 133 * back-off on prospective read-aheads. 134 */ 135 if (bp->b_flags & B_CACHE) { 136 if (!seqcount) { 137 return 0; 138 } else if ((bp->b_flags & B_RAM) == 0) { 139 return 0; 140 } else { 141 int s; 142 bp->b_flags &= ~B_RAM; 143 /* 144 * We do the spl here so that there is no window 145 * between the incore and the b_usecount increment 146 * below. We opt to keep the spl out of the loop 147 * for efficiency. 148 */ 149 s = splbio(); 150 VI_LOCK(vp); 151 for (i = 1; i < maxra; i++) { 152 /* 153 * Stop if the buffer does not exist or it 154 * is invalid (about to go away?) 155 */ 156 rbp = gbincore(vp, lblkno+i); 157 if (rbp == NULL || (rbp->b_flags & B_INVAL)) 158 break; 159 160 /* 161 * Set another read-ahead mark so we know 162 * to check again. 163 */ 164 if (((i % racluster) == (racluster - 1)) || 165 (i == (maxra - 1))) 166 rbp->b_flags |= B_RAM; 167 } 168 VI_UNLOCK(vp); 169 splx(s); 170 if (i >= maxra) { 171 return 0; 172 } 173 lblkno += i; 174 } 175 reqbp = bp = NULL; 176 /* 177 * If it isn't in the cache, then get a chunk from 178 * disk if sequential, otherwise just get the block. 179 */ 180 } else { 181 off_t firstread = bp->b_offset; 182 int nblks; 183 184 KASSERT(bp->b_offset != NOOFFSET, 185 ("cluster_read: no buffer offset")); 186 187 ncontig = 0; 188 189 /* 190 * Compute the total number of blocks that we should read 191 * synchronously. 192 */ 193 if (firstread + totread > filesize) 194 totread = filesize - firstread; 195 nblks = howmany(totread, size); 196 if (nblks > racluster) 197 nblks = racluster; 198 199 /* 200 * Now compute the number of contiguous blocks. 201 */ 202 if (nblks > 1) { 203 error = VOP_BMAP(vp, lblkno, NULL, 204 &blkno, &ncontig, NULL); 205 /* 206 * If this failed to map just do the original block. 207 */ 208 if (error || blkno == -1) 209 ncontig = 0; 210 } 211 212 /* 213 * If we have contiguous data available do a cluster 214 * otherwise just read the requested block. 215 */ 216 if (ncontig) { 217 /* Account for our first block. */ 218 ncontig = min(ncontig + 1, nblks); 219 if (ncontig < nblks) 220 nblks = ncontig; 221 bp = cluster_rbuild(vp, filesize, lblkno, 222 blkno, size, nblks, bp); 223 lblkno += (bp->b_bufsize / size); 224 } else { 225 bp->b_flags |= B_RAM; 226 bp->b_iocmd = BIO_READ; 227 lblkno += 1; 228 } 229 } 230 231 /* 232 * handle the synchronous read so that it is available ASAP. 233 */ 234 if (bp) { 235 if ((bp->b_flags & B_CLUSTER) == 0) { 236 vfs_busy_pages(bp, 0); 237 } 238 bp->b_flags &= ~B_INVAL; 239 bp->b_ioflags &= ~BIO_ERROR; 240 if ((bp->b_flags & B_ASYNC) || bp->b_iodone != NULL) 241 BUF_KERNPROC(bp); 242 bp->b_iooffset = dbtob(bp->b_blkno); 243 error = VOP_STRATEGY(vp, bp); 244 curproc->p_stats->p_ru.ru_inblock++; 245 if (error) 246 return (error); 247 } 248 249 /* 250 * If we have been doing sequential I/O, then do some read-ahead. 251 */ 252 while (lblkno < (origblkno + maxra)) { 253 error = VOP_BMAP(vp, lblkno, NULL, &blkno, &ncontig, NULL); 254 if (error) 255 break; 256 257 if (blkno == -1) 258 break; 259 260 /* 261 * We could throttle ncontig here by maxra but we might as 262 * well read the data if it is contiguous. We're throttled 263 * by racluster anyway. 264 */ 265 if (ncontig) { 266 ncontig = min(ncontig + 1, racluster); 267 rbp = cluster_rbuild(vp, filesize, lblkno, blkno, 268 size, ncontig, NULL); 269 lblkno += (rbp->b_bufsize / size); 270 if (rbp->b_flags & B_DELWRI) { 271 bqrelse(rbp); 272 continue; 273 } 274 } else { 275 rbp = getblk(vp, lblkno, size, 0, 0, 0); 276 lblkno += 1; 277 if (rbp->b_flags & B_DELWRI) { 278 bqrelse(rbp); 279 continue; 280 } 281 rbp->b_flags |= B_ASYNC | B_RAM; 282 rbp->b_iocmd = BIO_READ; 283 rbp->b_blkno = blkno; 284 } 285 if (rbp->b_flags & B_CACHE) { 286 rbp->b_flags &= ~B_ASYNC; 287 bqrelse(rbp); 288 continue; 289 } 290 if ((rbp->b_flags & B_CLUSTER) == 0) { 291 vfs_busy_pages(rbp, 0); 292 } 293 rbp->b_flags &= ~B_INVAL; 294 rbp->b_ioflags &= ~BIO_ERROR; 295 if ((rbp->b_flags & B_ASYNC) || rbp->b_iodone != NULL) 296 BUF_KERNPROC(rbp); 297 rbp->b_iooffset = dbtob(rbp->b_blkno); 298 (void) VOP_STRATEGY(vp, rbp); 299 curproc->p_stats->p_ru.ru_inblock++; 300 } 301 302 if (reqbp) 303 return (bufwait(reqbp)); 304 else 305 return (error); 306} 307 308/* 309 * If blocks are contiguous on disk, use this to provide clustered 310 * read ahead. We will read as many blocks as possible sequentially 311 * and then parcel them up into logical blocks in the buffer hash table. 312 */ 313static struct buf * 314cluster_rbuild(vp, filesize, lbn, blkno, size, run, fbp) 315 struct vnode *vp; 316 u_quad_t filesize; 317 daddr_t lbn; 318 daddr_t blkno; 319 long size; 320 int run; 321 struct buf *fbp; 322{ 323 struct buf *bp, *tbp; 324 daddr_t bn; 325 int i, inc, j; 326 327 GIANT_REQUIRED; 328 329 KASSERT(size == vp->v_mount->mnt_stat.f_iosize, 330 ("cluster_rbuild: size %ld != filesize %ld\n", 331 size, vp->v_mount->mnt_stat.f_iosize)); 332 333 /* 334 * avoid a division 335 */ 336 while ((u_quad_t) size * (lbn + run) > filesize) { 337 --run; 338 } 339 340 if (fbp) { 341 tbp = fbp; 342 tbp->b_iocmd = BIO_READ; 343 } else { 344 tbp = getblk(vp, lbn, size, 0, 0, 0); 345 if (tbp->b_flags & B_CACHE) 346 return tbp; 347 tbp->b_flags |= B_ASYNC | B_RAM; 348 tbp->b_iocmd = BIO_READ; 349 } 350 351 tbp->b_blkno = blkno; 352 if( (tbp->b_flags & B_MALLOC) || 353 ((tbp->b_flags & B_VMIO) == 0) || (run <= 1) ) 354 return tbp; 355 356 bp = trypbuf(&cluster_pbuf_freecnt); 357 if (bp == 0) 358 return tbp; 359 360 /* 361 * We are synthesizing a buffer out of vm_page_t's, but 362 * if the block size is not page aligned then the starting 363 * address may not be either. Inherit the b_data offset 364 * from the original buffer. 365 */ 366 bp->b_data = (char *)((vm_offset_t)bp->b_data | 367 ((vm_offset_t)tbp->b_data & PAGE_MASK)); 368 bp->b_flags = B_ASYNC | B_CLUSTER | B_VMIO; 369 bp->b_iocmd = BIO_READ; 370 bp->b_iodone = cluster_callback; 371 bp->b_blkno = blkno; 372 bp->b_lblkno = lbn; 373 bp->b_offset = tbp->b_offset; 374 KASSERT(bp->b_offset != NOOFFSET, ("cluster_rbuild: no buffer offset")); 375 pbgetvp(vp, bp); 376 bp->b_object = tbp->b_object; 377 378 TAILQ_INIT(&bp->b_cluster.cluster_head); 379 380 bp->b_bcount = 0; 381 bp->b_bufsize = 0; 382 bp->b_npages = 0; 383 384 inc = btodb(size); 385 for (bn = blkno, i = 0; i < run; ++i, bn += inc) { 386 if (i != 0) { 387 if ((bp->b_npages * PAGE_SIZE) + 388 round_page(size) > vp->v_mount->mnt_iosize_max) { 389 break; 390 } 391 392 tbp = getblk(vp, lbn + i, size, 0, 0, GB_LOCK_NOWAIT); 393 394 /* Don't wait around for locked bufs. */ 395 if (tbp == NULL) 396 break; 397 398 /* 399 * Stop scanning if the buffer is fully valid 400 * (marked B_CACHE), or locked (may be doing a 401 * background write), or if the buffer is not 402 * VMIO backed. The clustering code can only deal 403 * with VMIO-backed buffers. 404 */ 405 VI_LOCK(bp->b_vp); 406 if ((tbp->b_vflags & BV_BKGRDINPROG) || 407 (tbp->b_flags & B_CACHE) || 408 (tbp->b_flags & B_VMIO) == 0) { 409 VI_UNLOCK(bp->b_vp); 410 bqrelse(tbp); 411 break; 412 } 413 VI_UNLOCK(bp->b_vp); 414 415 /* 416 * The buffer must be completely invalid in order to 417 * take part in the cluster. If it is partially valid 418 * then we stop. 419 */ 420 VM_OBJECT_LOCK(tbp->b_object); 421 for (j = 0;j < tbp->b_npages; j++) { 422 VM_OBJECT_LOCK_ASSERT(tbp->b_pages[j]->object, 423 MA_OWNED); 424 if (tbp->b_pages[j]->valid) 425 break; 426 } 427 VM_OBJECT_UNLOCK(tbp->b_object); 428 if (j != tbp->b_npages) { 429 bqrelse(tbp); 430 break; 431 } 432 433 /* 434 * Set a read-ahead mark as appropriate 435 */ 436 if ((fbp && (i == 1)) || (i == (run - 1))) 437 tbp->b_flags |= B_RAM; 438 439 /* 440 * Set the buffer up for an async read (XXX should 441 * we do this only if we do not wind up brelse()ing?). 442 * Set the block number if it isn't set, otherwise 443 * if it is make sure it matches the block number we 444 * expect. 445 */ 446 tbp->b_flags |= B_ASYNC; 447 tbp->b_iocmd = BIO_READ; 448 if (tbp->b_blkno == tbp->b_lblkno) { 449 tbp->b_blkno = bn; 450 } else if (tbp->b_blkno != bn) { 451 brelse(tbp); 452 break; 453 } 454 } 455 /* 456 * XXX fbp from caller may not be B_ASYNC, but we are going 457 * to biodone() it in cluster_callback() anyway 458 */ 459 BUF_KERNPROC(tbp); 460 TAILQ_INSERT_TAIL(&bp->b_cluster.cluster_head, 461 tbp, b_cluster.cluster_entry); 462 VM_OBJECT_LOCK(tbp->b_object); 463 vm_page_lock_queues(); 464 for (j = 0; j < tbp->b_npages; j += 1) { 465 vm_page_t m; 466 m = tbp->b_pages[j]; 467 vm_page_io_start(m); 468 vm_object_pip_add(m->object, 1); 469 if ((bp->b_npages == 0) || 470 (bp->b_pages[bp->b_npages-1] != m)) { 471 bp->b_pages[bp->b_npages] = m; 472 bp->b_npages++; 473 } 474 if ((m->valid & VM_PAGE_BITS_ALL) == VM_PAGE_BITS_ALL) 475 tbp->b_pages[j] = bogus_page; 476 } 477 vm_page_unlock_queues(); 478 VM_OBJECT_UNLOCK(tbp->b_object); 479 /* 480 * XXX shouldn't this be += size for both, like in 481 * cluster_wbuild()? 482 * 483 * Don't inherit tbp->b_bufsize as it may be larger due to 484 * a non-page-aligned size. Instead just aggregate using 485 * 'size'. 486 */ 487 if (tbp->b_bcount != size) 488 printf("warning: tbp->b_bcount wrong %ld vs %ld\n", tbp->b_bcount, size); 489 if (tbp->b_bufsize != size) 490 printf("warning: tbp->b_bufsize wrong %ld vs %ld\n", tbp->b_bufsize, size); 491 bp->b_bcount += size; 492 bp->b_bufsize += size; 493 } 494 495 /* 496 * Fully valid pages in the cluster are already good and do not need 497 * to be re-read from disk. Replace the page with bogus_page 498 */ 499 VM_OBJECT_LOCK(bp->b_object); 500 for (j = 0; j < bp->b_npages; j++) { 501 VM_OBJECT_LOCK_ASSERT(bp->b_pages[j]->object, MA_OWNED); 502 if ((bp->b_pages[j]->valid & VM_PAGE_BITS_ALL) == 503 VM_PAGE_BITS_ALL) { 504 bp->b_pages[j] = bogus_page; 505 } 506 } 507 VM_OBJECT_UNLOCK(bp->b_object); 508 if (bp->b_bufsize > bp->b_kvasize) 509 panic("cluster_rbuild: b_bufsize(%ld) > b_kvasize(%d)\n", 510 bp->b_bufsize, bp->b_kvasize); 511 bp->b_kvasize = bp->b_bufsize; 512 513 pmap_qenter(trunc_page((vm_offset_t) bp->b_data), 514 (vm_page_t *)bp->b_pages, bp->b_npages); 515 return (bp); 516} 517 518/* 519 * Cleanup after a clustered read or write. 520 * This is complicated by the fact that any of the buffers might have 521 * extra memory (if there were no empty buffer headers at allocbuf time) 522 * that we will need to shift around. 523 */ 524void 525cluster_callback(bp) 526 struct buf *bp; 527{ 528 struct buf *nbp, *tbp; 529 int error = 0; 530 531 GIANT_REQUIRED; 532 533 /* 534 * Must propogate errors to all the components. 535 */ 536 if (bp->b_ioflags & BIO_ERROR) 537 error = bp->b_error; 538 539 pmap_qremove(trunc_page((vm_offset_t) bp->b_data), bp->b_npages); 540 /* 541 * Move memory from the large cluster buffer into the component 542 * buffers and mark IO as done on these. 543 */ 544 for (tbp = TAILQ_FIRST(&bp->b_cluster.cluster_head); 545 tbp; tbp = nbp) { 546 nbp = TAILQ_NEXT(&tbp->b_cluster, cluster_entry); 547 if (error) { 548 tbp->b_ioflags |= BIO_ERROR; 549 tbp->b_error = error; 550 } else { 551 tbp->b_dirtyoff = tbp->b_dirtyend = 0; 552 tbp->b_flags &= ~B_INVAL; 553 tbp->b_ioflags &= ~BIO_ERROR; 554 /* 555 * XXX the bdwrite()/bqrelse() issued during 556 * cluster building clears B_RELBUF (see bqrelse() 557 * comment). If direct I/O was specified, we have 558 * to restore it here to allow the buffer and VM 559 * to be freed. 560 */ 561 if (tbp->b_flags & B_DIRECT) 562 tbp->b_flags |= B_RELBUF; 563 } 564 bufdone(tbp); 565 } 566 relpbuf(bp, &cluster_pbuf_freecnt); 567} 568 569/* 570 * cluster_wbuild_wb: 571 * 572 * Implement modified write build for cluster. 573 * 574 * write_behind = 0 write behind disabled 575 * write_behind = 1 write behind normal (default) 576 * write_behind = 2 write behind backed-off 577 */ 578 579static __inline int 580cluster_wbuild_wb(struct vnode *vp, long size, daddr_t start_lbn, int len) 581{ 582 int r = 0; 583 584 switch(write_behind) { 585 case 2: 586 if (start_lbn < len) 587 break; 588 start_lbn -= len; 589 /* FALLTHROUGH */ 590 case 1: 591 r = cluster_wbuild(vp, size, start_lbn, len); 592 /* FALLTHROUGH */ 593 default: 594 /* FALLTHROUGH */ 595 break; 596 } 597 return(r); 598} 599 600/* 601 * Do clustered write for FFS. 602 * 603 * Three cases: 604 * 1. Write is not sequential (write asynchronously) 605 * Write is sequential: 606 * 2. beginning of cluster - begin cluster 607 * 3. middle of a cluster - add to cluster 608 * 4. end of a cluster - asynchronously write cluster 609 */ 610void 611cluster_write(bp, filesize, seqcount) 612 struct buf *bp; 613 u_quad_t filesize; 614 int seqcount; 615{ 616 struct vnode *vp; 617 daddr_t lbn; 618 int maxclen, cursize; 619 int lblocksize; 620 int async; 621 622 vp = bp->b_vp; 623 if (vp->v_type == VREG) { 624 async = vp->v_mount->mnt_flag & MNT_ASYNC; 625 lblocksize = vp->v_mount->mnt_stat.f_iosize; 626 } else { 627 async = 0; 628 lblocksize = bp->b_bufsize; 629 } 630 lbn = bp->b_lblkno; 631 KASSERT(bp->b_offset != NOOFFSET, ("cluster_write: no buffer offset")); 632 633 /* Initialize vnode to beginning of file. */ 634 if (lbn == 0) 635 vp->v_lasta = vp->v_clen = vp->v_cstart = vp->v_lastw = 0; 636 637 if (vp->v_clen == 0 || lbn != vp->v_lastw + 1 || 638 (bp->b_blkno != vp->v_lasta + btodb(lblocksize))) { 639 maxclen = vp->v_mount->mnt_iosize_max / lblocksize - 1; 640 if (vp->v_clen != 0) { 641 /* 642 * Next block is not sequential. 643 * 644 * If we are not writing at end of file, the process 645 * seeked to another point in the file since its last 646 * write, or we have reached our maximum cluster size, 647 * then push the previous cluster. Otherwise try 648 * reallocating to make it sequential. 649 * 650 * Change to algorithm: only push previous cluster if 651 * it was sequential from the point of view of the 652 * seqcount heuristic, otherwise leave the buffer 653 * intact so we can potentially optimize the I/O 654 * later on in the buf_daemon or update daemon 655 * flush. 656 */ 657 cursize = vp->v_lastw - vp->v_cstart + 1; 658 if (((u_quad_t) bp->b_offset + lblocksize) != filesize || 659 lbn != vp->v_lastw + 1 || vp->v_clen <= cursize) { 660 if (!async && seqcount > 0) { 661 cluster_wbuild_wb(vp, lblocksize, 662 vp->v_cstart, cursize); 663 } 664 } else { 665 struct buf **bpp, **endbp; 666 struct cluster_save *buflist; 667 668 buflist = cluster_collectbufs(vp, bp); 669 endbp = &buflist->bs_children 670 [buflist->bs_nchildren - 1]; 671 if (VOP_REALLOCBLKS(vp, buflist)) { 672 /* 673 * Failed, push the previous cluster 674 * if *really* writing sequentially 675 * in the logical file (seqcount > 1), 676 * otherwise delay it in the hopes that 677 * the low level disk driver can 678 * optimize the write ordering. 679 */ 680 for (bpp = buflist->bs_children; 681 bpp < endbp; bpp++) 682 brelse(*bpp); 683 free(buflist, M_SEGMENT); 684 if (seqcount > 1) { 685 cluster_wbuild_wb(vp, 686 lblocksize, vp->v_cstart, 687 cursize); 688 } 689 } else { 690 /* 691 * Succeeded, keep building cluster. 692 */ 693 for (bpp = buflist->bs_children; 694 bpp <= endbp; bpp++) 695 bdwrite(*bpp); 696 free(buflist, M_SEGMENT); 697 vp->v_lastw = lbn; 698 vp->v_lasta = bp->b_blkno; 699 return; 700 } 701 } 702 } 703 /* 704 * Consider beginning a cluster. If at end of file, make 705 * cluster as large as possible, otherwise find size of 706 * existing cluster. 707 */ 708 if ((vp->v_type == VREG) && 709 ((u_quad_t) bp->b_offset + lblocksize) != filesize && 710 (bp->b_blkno == bp->b_lblkno) && 711 (VOP_BMAP(vp, lbn, NULL, &bp->b_blkno, &maxclen, NULL) || 712 bp->b_blkno == -1)) { 713 bawrite(bp); 714 vp->v_clen = 0; 715 vp->v_lasta = bp->b_blkno; 716 vp->v_cstart = lbn + 1; 717 vp->v_lastw = lbn; 718 return; 719 } 720 vp->v_clen = maxclen; 721 if (!async && maxclen == 0) { /* I/O not contiguous */ 722 vp->v_cstart = lbn + 1; 723 bawrite(bp); 724 } else { /* Wait for rest of cluster */ 725 vp->v_cstart = lbn; 726 bdwrite(bp); 727 } 728 } else if (lbn == vp->v_cstart + vp->v_clen) { 729 /* 730 * At end of cluster, write it out if seqcount tells us we 731 * are operating sequentially, otherwise let the buf or 732 * update daemon handle it. 733 */ 734 bdwrite(bp); 735 if (seqcount > 1) 736 cluster_wbuild_wb(vp, lblocksize, vp->v_cstart, vp->v_clen + 1); 737 vp->v_clen = 0; 738 vp->v_cstart = lbn + 1; 739 } else if (vm_page_count_severe()) { 740 /* 741 * We are low on memory, get it going NOW 742 */ 743 bawrite(bp); 744 } else { 745 /* 746 * In the middle of a cluster, so just delay the I/O for now. 747 */ 748 bdwrite(bp); 749 } 750 vp->v_lastw = lbn; 751 vp->v_lasta = bp->b_blkno; 752} 753 754 755/* 756 * This is an awful lot like cluster_rbuild...wish they could be combined. 757 * The last lbn argument is the current block on which I/O is being 758 * performed. Check to see that it doesn't fall in the middle of 759 * the current block (if last_bp == NULL). 760 */ 761int 762cluster_wbuild(vp, size, start_lbn, len) 763 struct vnode *vp; 764 long size; 765 daddr_t start_lbn; 766 int len; 767{ 768 struct buf *bp, *tbp; 769 int i, j, s; 770 int totalwritten = 0; 771 int dbsize = btodb(size); 772 773 GIANT_REQUIRED; 774 775 while (len > 0) { 776 s = splbio(); 777 /* 778 * If the buffer is not delayed-write (i.e. dirty), or it 779 * is delayed-write but either locked or inval, it cannot 780 * partake in the clustered write. 781 */ 782 VI_LOCK(vp); 783 if ((tbp = gbincore(vp, start_lbn)) == NULL || 784 (tbp->b_vflags & BV_BKGRDINPROG)) { 785 VI_UNLOCK(vp); 786 ++start_lbn; 787 --len; 788 splx(s); 789 continue; 790 } 791 if (BUF_LOCK(tbp, 792 LK_EXCLUSIVE | LK_NOWAIT | LK_INTERLOCK, VI_MTX(vp))) { 793 ++start_lbn; 794 --len; 795 splx(s); 796 continue; 797 } 798 if ((tbp->b_flags & (B_INVAL | B_DELWRI)) != B_DELWRI) { 799 BUF_UNLOCK(tbp); 800 ++start_lbn; 801 --len; 802 splx(s); 803 continue; 804 } 805 bremfree(tbp); 806 tbp->b_flags &= ~B_DONE; 807 splx(s); 808 809 /* 810 * Extra memory in the buffer, punt on this buffer. 811 * XXX we could handle this in most cases, but we would 812 * have to push the extra memory down to after our max 813 * possible cluster size and then potentially pull it back 814 * up if the cluster was terminated prematurely--too much 815 * hassle. 816 */ 817 if (((tbp->b_flags & (B_CLUSTEROK | B_MALLOC | B_VMIO)) != 818 (B_CLUSTEROK | B_VMIO)) || 819 (tbp->b_bcount != tbp->b_bufsize) || 820 (tbp->b_bcount != size) || 821 (len == 1) || 822 ((bp = getpbuf(&cluster_pbuf_freecnt)) == NULL)) { 823 totalwritten += tbp->b_bufsize; 824 bawrite(tbp); 825 ++start_lbn; 826 --len; 827 continue; 828 } 829 830 /* 831 * We got a pbuf to make the cluster in. 832 * so initialise it. 833 */ 834 TAILQ_INIT(&bp->b_cluster.cluster_head); 835 bp->b_bcount = 0; 836 bp->b_magic = tbp->b_magic; 837 bp->b_op = tbp->b_op; 838 bp->b_bufsize = 0; 839 bp->b_npages = 0; 840 if (tbp->b_wcred != NOCRED) 841 bp->b_wcred = crhold(tbp->b_wcred); 842 843 bp->b_blkno = tbp->b_blkno; 844 bp->b_lblkno = tbp->b_lblkno; 845 bp->b_offset = tbp->b_offset; 846 847 /* 848 * We are synthesizing a buffer out of vm_page_t's, but 849 * if the block size is not page aligned then the starting 850 * address may not be either. Inherit the b_data offset 851 * from the original buffer. 852 */ 853 bp->b_data = (char *)((vm_offset_t)bp->b_data | 854 ((vm_offset_t)tbp->b_data & PAGE_MASK)); 855 bp->b_flags |= B_CLUSTER | 856 (tbp->b_flags & (B_VMIO | B_NEEDCOMMIT)); 857 bp->b_iodone = cluster_callback; 858 pbgetvp(vp, bp); 859 /* 860 * From this location in the file, scan forward to see 861 * if there are buffers with adjacent data that need to 862 * be written as well. 863 */ 864 for (i = 0; i < len; ++i, ++start_lbn) { 865 if (i != 0) { /* If not the first buffer */ 866 s = splbio(); 867 /* 868 * If the adjacent data is not even in core it 869 * can't need to be written. 870 */ 871 VI_LOCK(vp); 872 if ((tbp = gbincore(vp, start_lbn)) == NULL || 873 (tbp->b_vflags & BV_BKGRDINPROG)) { 874 VI_UNLOCK(vp); 875 splx(s); 876 break; 877 } 878 879 /* 880 * If it IS in core, but has different 881 * characteristics, or is locked (which 882 * means it could be undergoing a background 883 * I/O or be in a weird state), then don't 884 * cluster with it. 885 */ 886 if (BUF_LOCK(tbp, 887 LK_EXCLUSIVE | LK_NOWAIT | LK_INTERLOCK, 888 VI_MTX(vp))) { 889 splx(s); 890 break; 891 } 892 893 if ((tbp->b_flags & (B_VMIO | B_CLUSTEROK | 894 B_INVAL | B_DELWRI | B_NEEDCOMMIT)) 895 != (B_DELWRI | B_CLUSTEROK | 896 (bp->b_flags & (B_VMIO | B_NEEDCOMMIT))) || 897 tbp->b_wcred != bp->b_wcred) { 898 BUF_UNLOCK(tbp); 899 splx(s); 900 break; 901 } 902 903 /* 904 * Check that the combined cluster 905 * would make sense with regard to pages 906 * and would not be too large 907 */ 908 if ((tbp->b_bcount != size) || 909 ((bp->b_blkno + (dbsize * i)) != 910 tbp->b_blkno) || 911 ((tbp->b_npages + bp->b_npages) > 912 (vp->v_mount->mnt_iosize_max / PAGE_SIZE))) { 913 BUF_UNLOCK(tbp); 914 splx(s); 915 break; 916 } 917 /* 918 * Ok, it's passed all the tests, 919 * so remove it from the free list 920 * and mark it busy. We will use it. 921 */ 922 bremfree(tbp); 923 tbp->b_flags &= ~B_DONE; 924 splx(s); 925 } /* end of code for non-first buffers only */ 926 /* check for latent dependencies to be handled */ 927 if ((LIST_FIRST(&tbp->b_dep)) != NULL) { 928 tbp->b_iocmd = BIO_WRITE; 929 buf_start(tbp); 930 } 931 /* 932 * If the IO is via the VM then we do some 933 * special VM hackery (yuck). Since the buffer's 934 * block size may not be page-aligned it is possible 935 * for a page to be shared between two buffers. We 936 * have to get rid of the duplication when building 937 * the cluster. 938 */ 939 if (tbp->b_flags & B_VMIO) { 940 vm_page_t m; 941 942 if (i != 0) { /* if not first buffer */ 943 for (j = 0; j < tbp->b_npages; j += 1) { 944 m = tbp->b_pages[j]; 945 if (m->flags & PG_BUSY) { 946 bqrelse(tbp); 947 goto finishcluster; 948 } 949 } 950 } 951 if (tbp->b_object != NULL) 952 VM_OBJECT_LOCK(tbp->b_object); 953 vm_page_lock_queues(); 954 for (j = 0; j < tbp->b_npages; j += 1) { 955 m = tbp->b_pages[j]; 956 vm_page_io_start(m); 957 vm_object_pip_add(m->object, 1); 958 if ((bp->b_npages == 0) || 959 (bp->b_pages[bp->b_npages - 1] != m)) { 960 bp->b_pages[bp->b_npages] = m; 961 bp->b_npages++; 962 } 963 } 964 vm_page_unlock_queues(); 965 if (tbp->b_object != NULL) 966 VM_OBJECT_UNLOCK(tbp->b_object); 967 } 968 bp->b_bcount += size; 969 bp->b_bufsize += size; 970 971 s = splbio(); 972 bundirty(tbp); 973 tbp->b_flags &= ~B_DONE; 974 tbp->b_ioflags &= ~BIO_ERROR; 975 tbp->b_flags |= B_ASYNC; 976 tbp->b_iocmd = BIO_WRITE; 977 reassignbuf(tbp, tbp->b_vp); /* put on clean list */ 978 VI_LOCK(tbp->b_vp); 979 ++tbp->b_vp->v_numoutput; 980 VI_UNLOCK(tbp->b_vp); 981 splx(s); 982 BUF_KERNPROC(tbp); 983 TAILQ_INSERT_TAIL(&bp->b_cluster.cluster_head, 984 tbp, b_cluster.cluster_entry); 985 } 986 finishcluster: 987 pmap_qenter(trunc_page((vm_offset_t) bp->b_data), 988 (vm_page_t *) bp->b_pages, bp->b_npages); 989 if (bp->b_bufsize > bp->b_kvasize) 990 panic( 991 "cluster_wbuild: b_bufsize(%ld) > b_kvasize(%d)\n", 992 bp->b_bufsize, bp->b_kvasize); 993 bp->b_kvasize = bp->b_bufsize; 994 totalwritten += bp->b_bufsize; 995 bp->b_dirtyoff = 0; 996 bp->b_dirtyend = bp->b_bufsize; 997 bawrite(bp); 998 999 len -= i; 1000 } 1001 return totalwritten; 1002} 1003 1004/* 1005 * Collect together all the buffers in a cluster. 1006 * Plus add one additional buffer. 1007 */ 1008static struct cluster_save * 1009cluster_collectbufs(vp, last_bp) 1010 struct vnode *vp; 1011 struct buf *last_bp; 1012{ 1013 struct cluster_save *buflist; 1014 struct buf *bp; 1015 daddr_t lbn; 1016 int i, len; 1017 1018 len = vp->v_lastw - vp->v_cstart + 1; 1019 buflist = malloc(sizeof(struct buf *) * (len + 1) + sizeof(*buflist), 1020 M_SEGMENT, M_WAITOK); 1021 buflist->bs_nchildren = 0; 1022 buflist->bs_children = (struct buf **) (buflist + 1); 1023 for (lbn = vp->v_cstart, i = 0; i < len; lbn++, i++) { 1024 (void) bread(vp, lbn, last_bp->b_bcount, NOCRED, &bp); 1025 buflist->bs_children[i] = bp; 1026 if (bp->b_blkno == bp->b_lblkno) 1027 VOP_BMAP(bp->b_vp, bp->b_lblkno, NULL, &bp->b_blkno, 1028 NULL, NULL); 1029 } 1030 buflist->bs_children[i] = bp = last_bp; 1031 if (bp->b_blkno == bp->b_lblkno) 1032 VOP_BMAP(bp->b_vp, bp->b_lblkno, NULL, &bp->b_blkno, 1033 NULL, NULL); 1034 buflist->bs_nchildren = i + 1; 1035 return (buflist); 1036} 1037