vfs_cluster.c revision 112367
1/*- 2 * Copyright (c) 1993 3 * The Regents of the University of California. All rights reserved. 4 * Modifications/enhancements: 5 * Copyright (c) 1995 John S. Dyson. All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 3. All advertising materials mentioning features or use of this software 16 * must display the following acknowledgement: 17 * This product includes software developed by the University of 18 * California, Berkeley and its contributors. 19 * 4. Neither the name of the University nor the names of its contributors 20 * may be used to endorse or promote products derived from this software 21 * without specific prior written permission. 22 * 23 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 24 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 25 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 26 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 27 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 28 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 29 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 30 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 31 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 32 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 33 * SUCH DAMAGE. 34 * 35 * @(#)vfs_cluster.c 8.7 (Berkeley) 2/13/94 36 * $FreeBSD: head/sys/kern/vfs_cluster.c 112367 2003-03-18 08:45:25Z phk $ 37 */ 38 39#include "opt_debug_cluster.h" 40 41#include <sys/param.h> 42#include <sys/systm.h> 43#include <sys/kernel.h> 44#include <sys/proc.h> 45#include <sys/bio.h> 46#include <sys/buf.h> 47#include <sys/vnode.h> 48#include <sys/malloc.h> 49#include <sys/mount.h> 50#include <sys/resourcevar.h> 51#include <sys/vmmeter.h> 52#include <vm/vm.h> 53#include <vm/vm_object.h> 54#include <vm/vm_page.h> 55#include <sys/sysctl.h> 56 57#if defined(CLUSTERDEBUG) 58static int rcluster= 0; 59SYSCTL_INT(_debug, OID_AUTO, rcluster, CTLFLAG_RW, &rcluster, 0, 60 "Debug VFS clustering code"); 61#endif 62 63static MALLOC_DEFINE(M_SEGMENT, "cluster_save buffer", "cluster_save buffer"); 64 65static struct cluster_save * 66 cluster_collectbufs(struct vnode *vp, struct buf *last_bp); 67static struct buf * 68 cluster_rbuild(struct vnode *vp, u_quad_t filesize, daddr_t lbn, 69 daddr_t blkno, long size, int run, struct buf *fbp); 70 71static int write_behind = 1; 72SYSCTL_INT(_vfs, OID_AUTO, write_behind, CTLFLAG_RW, &write_behind, 0, 73 "Cluster write-behind; 0: disable, 1: enable, 2: backed off"); 74 75static int read_max = 8; 76SYSCTL_INT(_vfs, OID_AUTO, read_max, CTLFLAG_RW, &read_max, 0, 77 "Cluster read-ahead max block count"); 78 79/* Page expended to mark partially backed buffers */ 80extern vm_page_t bogus_page; 81 82/* 83 * Number of physical bufs (pbufs) this subsystem is allowed. 84 * Manipulated by vm_pager.c 85 */ 86extern int cluster_pbuf_freecnt; 87 88/* 89 * Read data to a buf, including read-ahead if we find this to be beneficial. 90 * cluster_read replaces bread. 91 */ 92int 93cluster_read(vp, filesize, lblkno, size, cred, totread, seqcount, bpp) 94 struct vnode *vp; 95 u_quad_t filesize; 96 daddr_t lblkno; 97 long size; 98 struct ucred *cred; 99 long totread; 100 int seqcount; 101 struct buf **bpp; 102{ 103 struct buf *bp, *rbp, *reqbp; 104 daddr_t blkno, origblkno; 105 int maxra, racluster; 106 int error, ncontig; 107 int i; 108 109 error = 0; 110 111 /* 112 * Try to limit the amount of read-ahead by a few 113 * ad-hoc parameters. This needs work!!! 114 */ 115 racluster = vp->v_mount->mnt_iosize_max / size; 116 maxra = seqcount; 117 maxra = min(read_max, maxra); 118 maxra = min(nbuf/8, maxra); 119 if (((u_quad_t)(lblkno + maxra + 1) * size) > filesize) 120 maxra = (filesize / size) - lblkno; 121 122 /* 123 * get the requested block 124 */ 125 *bpp = reqbp = bp = getblk(vp, lblkno, size, 0, 0, 0); 126 origblkno = lblkno; 127 128 /* 129 * if it is in the cache, then check to see if the reads have been 130 * sequential. If they have, then try some read-ahead, otherwise 131 * back-off on prospective read-aheads. 132 */ 133 if (bp->b_flags & B_CACHE) { 134 if (!seqcount) { 135 return 0; 136 } else if ((bp->b_flags & B_RAM) == 0) { 137 return 0; 138 } else { 139 int s; 140 bp->b_flags &= ~B_RAM; 141 /* 142 * We do the spl here so that there is no window 143 * between the incore and the b_usecount increment 144 * below. We opt to keep the spl out of the loop 145 * for efficiency. 146 */ 147 s = splbio(); 148 VI_LOCK(vp); 149 for (i = 1; i < maxra; i++) { 150 /* 151 * Stop if the buffer does not exist or it 152 * is invalid (about to go away?) 153 */ 154 rbp = gbincore(vp, lblkno+i); 155 if (rbp == NULL || (rbp->b_flags & B_INVAL)) 156 break; 157 158 /* 159 * Set another read-ahead mark so we know 160 * to check again. 161 */ 162 if (((i % racluster) == (racluster - 1)) || 163 (i == (maxra - 1))) 164 rbp->b_flags |= B_RAM; 165 } 166 VI_UNLOCK(vp); 167 splx(s); 168 if (i >= maxra) { 169 return 0; 170 } 171 lblkno += i; 172 } 173 reqbp = bp = NULL; 174 /* 175 * If it isn't in the cache, then get a chunk from 176 * disk if sequential, otherwise just get the block. 177 */ 178 } else { 179 off_t firstread = bp->b_offset; 180 int nblks; 181 182 KASSERT(bp->b_offset != NOOFFSET, 183 ("cluster_read: no buffer offset")); 184 185 ncontig = 0; 186 187 /* 188 * Compute the total number of blocks that we should read 189 * synchronously. 190 */ 191 if (firstread + totread > filesize) 192 totread = filesize - firstread; 193 nblks = howmany(totread, size); 194 if (nblks > racluster) 195 nblks = racluster; 196 197 /* 198 * Now compute the number of contiguous blocks. 199 */ 200 if (nblks > 1) { 201 error = VOP_BMAP(vp, lblkno, NULL, 202 &blkno, &ncontig, NULL); 203 /* 204 * If this failed to map just do the original block. 205 */ 206 if (error || blkno == -1) 207 ncontig = 0; 208 } 209 210 /* 211 * If we have contiguous data available do a cluster 212 * otherwise just read the requested block. 213 */ 214 if (ncontig) { 215 /* Account for our first block. */ 216 ncontig = min(ncontig + 1, nblks); 217 if (ncontig < nblks) 218 nblks = ncontig; 219 bp = cluster_rbuild(vp, filesize, lblkno, 220 blkno, size, nblks, bp); 221 lblkno += (bp->b_bufsize / size); 222 } else { 223 bp->b_flags |= B_RAM; 224 bp->b_iocmd = BIO_READ; 225 lblkno += 1; 226 } 227 } 228 229 /* 230 * handle the synchronous read so that it is available ASAP. 231 */ 232 if (bp) { 233 if ((bp->b_flags & B_CLUSTER) == 0) { 234 vfs_busy_pages(bp, 0); 235 } 236 bp->b_flags &= ~B_INVAL; 237 bp->b_ioflags &= ~BIO_ERROR; 238 if ((bp->b_flags & B_ASYNC) || bp->b_iodone != NULL) 239 BUF_KERNPROC(bp); 240 error = VOP_STRATEGY(vp, bp); 241 curproc->p_stats->p_ru.ru_inblock++; 242 if (error) 243 return (error); 244 } 245 246 /* 247 * If we have been doing sequential I/O, then do some read-ahead. 248 */ 249 while (lblkno < (origblkno + maxra)) { 250 error = VOP_BMAP(vp, lblkno, NULL, &blkno, &ncontig, NULL); 251 if (error) 252 break; 253 254 if (blkno == -1) 255 break; 256 257 /* 258 * We could throttle ncontig here by maxra but we might as 259 * well read the data if it is contiguous. We're throttled 260 * by racluster anyway. 261 */ 262 if (ncontig) { 263 ncontig = min(ncontig + 1, racluster); 264 rbp = cluster_rbuild(vp, filesize, lblkno, blkno, 265 size, ncontig, NULL); 266 lblkno += (rbp->b_bufsize / size); 267 } else { 268 rbp = getblk(vp, lblkno, size, 0, 0, 0); 269 rbp->b_flags |= B_ASYNC | B_RAM; 270 rbp->b_iocmd = BIO_READ; 271 rbp->b_blkno = blkno; 272 lblkno += 1; 273 } 274 if (rbp->b_flags & B_CACHE) { 275 rbp->b_flags &= ~B_ASYNC; 276 bqrelse(rbp); 277 continue; 278 } 279 if ((rbp->b_flags & B_CLUSTER) == 0) { 280 vfs_busy_pages(rbp, 0); 281 } 282 rbp->b_flags &= ~B_INVAL; 283 rbp->b_ioflags &= ~BIO_ERROR; 284 if ((rbp->b_flags & B_ASYNC) || rbp->b_iodone != NULL) 285 BUF_KERNPROC(rbp); 286 (void) VOP_STRATEGY(vp, rbp); 287 curproc->p_stats->p_ru.ru_inblock++; 288 } 289 290 if (reqbp) 291 return (bufwait(reqbp)); 292 else 293 return (error); 294} 295 296/* 297 * If blocks are contiguous on disk, use this to provide clustered 298 * read ahead. We will read as many blocks as possible sequentially 299 * and then parcel them up into logical blocks in the buffer hash table. 300 */ 301static struct buf * 302cluster_rbuild(vp, filesize, lbn, blkno, size, run, fbp) 303 struct vnode *vp; 304 u_quad_t filesize; 305 daddr_t lbn; 306 daddr_t blkno; 307 long size; 308 int run; 309 struct buf *fbp; 310{ 311 struct buf *bp, *tbp; 312 daddr_t bn; 313 int i, inc, j; 314 315 GIANT_REQUIRED; 316 317 KASSERT(size == vp->v_mount->mnt_stat.f_iosize, 318 ("cluster_rbuild: size %ld != filesize %ld\n", 319 size, vp->v_mount->mnt_stat.f_iosize)); 320 321 /* 322 * avoid a division 323 */ 324 while ((u_quad_t) size * (lbn + run) > filesize) { 325 --run; 326 } 327 328 if (fbp) { 329 tbp = fbp; 330 tbp->b_iocmd = BIO_READ; 331 } else { 332 tbp = getblk(vp, lbn, size, 0, 0, 0); 333 if (tbp->b_flags & B_CACHE) 334 return tbp; 335 tbp->b_flags |= B_ASYNC | B_RAM; 336 tbp->b_iocmd = BIO_READ; 337 } 338 339 tbp->b_blkno = blkno; 340 if( (tbp->b_flags & B_MALLOC) || 341 ((tbp->b_flags & B_VMIO) == 0) || (run <= 1) ) 342 return tbp; 343 344 bp = trypbuf(&cluster_pbuf_freecnt); 345 if (bp == 0) 346 return tbp; 347 348 /* 349 * We are synthesizing a buffer out of vm_page_t's, but 350 * if the block size is not page aligned then the starting 351 * address may not be either. Inherit the b_data offset 352 * from the original buffer. 353 */ 354 bp->b_data = (char *)((vm_offset_t)bp->b_data | 355 ((vm_offset_t)tbp->b_data & PAGE_MASK)); 356 bp->b_flags = B_ASYNC | B_CLUSTER | B_VMIO; 357 bp->b_iocmd = BIO_READ; 358 bp->b_iodone = cluster_callback; 359 bp->b_blkno = blkno; 360 bp->b_lblkno = lbn; 361 bp->b_offset = tbp->b_offset; 362 KASSERT(bp->b_offset != NOOFFSET, ("cluster_rbuild: no buffer offset")); 363 pbgetvp(vp, bp); 364 365 TAILQ_INIT(&bp->b_cluster.cluster_head); 366 367 bp->b_bcount = 0; 368 bp->b_bufsize = 0; 369 bp->b_npages = 0; 370 371 inc = btodb(size); 372 for (bn = blkno, i = 0; i < run; ++i, bn += inc) { 373 if (i != 0) { 374 if ((bp->b_npages * PAGE_SIZE) + 375 round_page(size) > vp->v_mount->mnt_iosize_max) { 376 break; 377 } 378 379 tbp = getblk(vp, lbn + i, size, 0, 0, GB_LOCK_NOWAIT); 380 381 /* Don't wait around for locked bufs. */ 382 if (tbp == NULL) 383 break; 384 385 /* 386 * Stop scanning if the buffer is fully valid 387 * (marked B_CACHE), or locked (may be doing a 388 * background write), or if the buffer is not 389 * VMIO backed. The clustering code can only deal 390 * with VMIO-backed buffers. 391 */ 392 if ((tbp->b_flags & (B_CACHE|B_LOCKED)) || 393 (tbp->b_flags & B_VMIO) == 0) { 394 bqrelse(tbp); 395 break; 396 } 397 398 /* 399 * The buffer must be completely invalid in order to 400 * take part in the cluster. If it is partially valid 401 * then we stop. 402 */ 403 for (j = 0;j < tbp->b_npages; j++) { 404 if (tbp->b_pages[j]->valid) 405 break; 406 } 407 if (j != tbp->b_npages) { 408 bqrelse(tbp); 409 break; 410 } 411 412 /* 413 * Set a read-ahead mark as appropriate 414 */ 415 if ((fbp && (i == 1)) || (i == (run - 1))) 416 tbp->b_flags |= B_RAM; 417 418 /* 419 * Set the buffer up for an async read (XXX should 420 * we do this only if we do not wind up brelse()ing?). 421 * Set the block number if it isn't set, otherwise 422 * if it is make sure it matches the block number we 423 * expect. 424 */ 425 tbp->b_flags |= B_ASYNC; 426 tbp->b_iocmd = BIO_READ; 427 if (tbp->b_blkno == tbp->b_lblkno) { 428 tbp->b_blkno = bn; 429 } else if (tbp->b_blkno != bn) { 430 brelse(tbp); 431 break; 432 } 433 } 434 /* 435 * XXX fbp from caller may not be B_ASYNC, but we are going 436 * to biodone() it in cluster_callback() anyway 437 */ 438 BUF_KERNPROC(tbp); 439 TAILQ_INSERT_TAIL(&bp->b_cluster.cluster_head, 440 tbp, b_cluster.cluster_entry); 441 vm_page_lock_queues(); 442 for (j = 0; j < tbp->b_npages; j += 1) { 443 vm_page_t m; 444 m = tbp->b_pages[j]; 445 vm_page_io_start(m); 446 vm_object_pip_add(m->object, 1); 447 if ((bp->b_npages == 0) || 448 (bp->b_pages[bp->b_npages-1] != m)) { 449 bp->b_pages[bp->b_npages] = m; 450 bp->b_npages++; 451 } 452 if ((m->valid & VM_PAGE_BITS_ALL) == VM_PAGE_BITS_ALL) 453 tbp->b_pages[j] = bogus_page; 454 } 455 vm_page_unlock_queues(); 456 /* 457 * XXX shouldn't this be += size for both, like in 458 * cluster_wbuild()? 459 * 460 * Don't inherit tbp->b_bufsize as it may be larger due to 461 * a non-page-aligned size. Instead just aggregate using 462 * 'size'. 463 */ 464 if (tbp->b_bcount != size) 465 printf("warning: tbp->b_bcount wrong %ld vs %ld\n", tbp->b_bcount, size); 466 if (tbp->b_bufsize != size) 467 printf("warning: tbp->b_bufsize wrong %ld vs %ld\n", tbp->b_bufsize, size); 468 bp->b_bcount += size; 469 bp->b_bufsize += size; 470 } 471 472 /* 473 * Fully valid pages in the cluster are already good and do not need 474 * to be re-read from disk. Replace the page with bogus_page 475 */ 476 for (j = 0; j < bp->b_npages; j++) { 477 if ((bp->b_pages[j]->valid & VM_PAGE_BITS_ALL) == 478 VM_PAGE_BITS_ALL) { 479 bp->b_pages[j] = bogus_page; 480 } 481 } 482 if (bp->b_bufsize > bp->b_kvasize) 483 panic("cluster_rbuild: b_bufsize(%ld) > b_kvasize(%d)\n", 484 bp->b_bufsize, bp->b_kvasize); 485 bp->b_kvasize = bp->b_bufsize; 486 487 pmap_qenter(trunc_page((vm_offset_t) bp->b_data), 488 (vm_page_t *)bp->b_pages, bp->b_npages); 489 return (bp); 490} 491 492/* 493 * Cleanup after a clustered read or write. 494 * This is complicated by the fact that any of the buffers might have 495 * extra memory (if there were no empty buffer headers at allocbuf time) 496 * that we will need to shift around. 497 */ 498void 499cluster_callback(bp) 500 struct buf *bp; 501{ 502 struct buf *nbp, *tbp; 503 int error = 0; 504 505 GIANT_REQUIRED; 506 507 /* 508 * Must propogate errors to all the components. 509 */ 510 if (bp->b_ioflags & BIO_ERROR) 511 error = bp->b_error; 512 513 pmap_qremove(trunc_page((vm_offset_t) bp->b_data), bp->b_npages); 514 /* 515 * Move memory from the large cluster buffer into the component 516 * buffers and mark IO as done on these. 517 */ 518 for (tbp = TAILQ_FIRST(&bp->b_cluster.cluster_head); 519 tbp; tbp = nbp) { 520 nbp = TAILQ_NEXT(&tbp->b_cluster, cluster_entry); 521 if (error) { 522 tbp->b_ioflags |= BIO_ERROR; 523 tbp->b_error = error; 524 } else { 525 tbp->b_dirtyoff = tbp->b_dirtyend = 0; 526 tbp->b_flags &= ~B_INVAL; 527 tbp->b_ioflags &= ~BIO_ERROR; 528 /* 529 * XXX the bdwrite()/bqrelse() issued during 530 * cluster building clears B_RELBUF (see bqrelse() 531 * comment). If direct I/O was specified, we have 532 * to restore it here to allow the buffer and VM 533 * to be freed. 534 */ 535 if (tbp->b_flags & B_DIRECT) 536 tbp->b_flags |= B_RELBUF; 537 } 538 bufdone(tbp); 539 } 540 relpbuf(bp, &cluster_pbuf_freecnt); 541} 542 543/* 544 * cluster_wbuild_wb: 545 * 546 * Implement modified write build for cluster. 547 * 548 * write_behind = 0 write behind disabled 549 * write_behind = 1 write behind normal (default) 550 * write_behind = 2 write behind backed-off 551 */ 552 553static __inline int 554cluster_wbuild_wb(struct vnode *vp, long size, daddr_t start_lbn, int len) 555{ 556 int r = 0; 557 558 switch(write_behind) { 559 case 2: 560 if (start_lbn < len) 561 break; 562 start_lbn -= len; 563 /* FALLTHROUGH */ 564 case 1: 565 r = cluster_wbuild(vp, size, start_lbn, len); 566 /* FALLTHROUGH */ 567 default: 568 /* FALLTHROUGH */ 569 break; 570 } 571 return(r); 572} 573 574/* 575 * Do clustered write for FFS. 576 * 577 * Three cases: 578 * 1. Write is not sequential (write asynchronously) 579 * Write is sequential: 580 * 2. beginning of cluster - begin cluster 581 * 3. middle of a cluster - add to cluster 582 * 4. end of a cluster - asynchronously write cluster 583 */ 584void 585cluster_write(bp, filesize, seqcount) 586 struct buf *bp; 587 u_quad_t filesize; 588 int seqcount; 589{ 590 struct vnode *vp; 591 daddr_t lbn; 592 int maxclen, cursize; 593 int lblocksize; 594 int async; 595 596 vp = bp->b_vp; 597 if (vp->v_type == VREG) { 598 async = vp->v_mount->mnt_flag & MNT_ASYNC; 599 lblocksize = vp->v_mount->mnt_stat.f_iosize; 600 } else { 601 async = 0; 602 lblocksize = bp->b_bufsize; 603 } 604 lbn = bp->b_lblkno; 605 KASSERT(bp->b_offset != NOOFFSET, ("cluster_write: no buffer offset")); 606 607 /* Initialize vnode to beginning of file. */ 608 if (lbn == 0) 609 vp->v_lasta = vp->v_clen = vp->v_cstart = vp->v_lastw = 0; 610 611 if (vp->v_clen == 0 || lbn != vp->v_lastw + 1 || 612 (bp->b_blkno != vp->v_lasta + btodb(lblocksize))) { 613 maxclen = vp->v_mount->mnt_iosize_max / lblocksize - 1; 614 if (vp->v_clen != 0) { 615 /* 616 * Next block is not sequential. 617 * 618 * If we are not writing at end of file, the process 619 * seeked to another point in the file since its last 620 * write, or we have reached our maximum cluster size, 621 * then push the previous cluster. Otherwise try 622 * reallocating to make it sequential. 623 * 624 * Change to algorithm: only push previous cluster if 625 * it was sequential from the point of view of the 626 * seqcount heuristic, otherwise leave the buffer 627 * intact so we can potentially optimize the I/O 628 * later on in the buf_daemon or update daemon 629 * flush. 630 */ 631 cursize = vp->v_lastw - vp->v_cstart + 1; 632 if (((u_quad_t) bp->b_offset + lblocksize) != filesize || 633 lbn != vp->v_lastw + 1 || vp->v_clen <= cursize) { 634 if (!async && seqcount > 0) { 635 cluster_wbuild_wb(vp, lblocksize, 636 vp->v_cstart, cursize); 637 } 638 } else { 639 struct buf **bpp, **endbp; 640 struct cluster_save *buflist; 641 642 buflist = cluster_collectbufs(vp, bp); 643 endbp = &buflist->bs_children 644 [buflist->bs_nchildren - 1]; 645 if (VOP_REALLOCBLKS(vp, buflist)) { 646 /* 647 * Failed, push the previous cluster 648 * if *really* writing sequentially 649 * in the logical file (seqcount > 1), 650 * otherwise delay it in the hopes that 651 * the low level disk driver can 652 * optimize the write ordering. 653 */ 654 for (bpp = buflist->bs_children; 655 bpp < endbp; bpp++) 656 brelse(*bpp); 657 free(buflist, M_SEGMENT); 658 if (seqcount > 1) { 659 cluster_wbuild_wb(vp, 660 lblocksize, vp->v_cstart, 661 cursize); 662 } 663 } else { 664 /* 665 * Succeeded, keep building cluster. 666 */ 667 for (bpp = buflist->bs_children; 668 bpp <= endbp; bpp++) 669 bdwrite(*bpp); 670 free(buflist, M_SEGMENT); 671 vp->v_lastw = lbn; 672 vp->v_lasta = bp->b_blkno; 673 return; 674 } 675 } 676 } 677 /* 678 * Consider beginning a cluster. If at end of file, make 679 * cluster as large as possible, otherwise find size of 680 * existing cluster. 681 */ 682 if ((vp->v_type == VREG) && 683 ((u_quad_t) bp->b_offset + lblocksize) != filesize && 684 (bp->b_blkno == bp->b_lblkno) && 685 (VOP_BMAP(vp, lbn, NULL, &bp->b_blkno, &maxclen, NULL) || 686 bp->b_blkno == -1)) { 687 bawrite(bp); 688 vp->v_clen = 0; 689 vp->v_lasta = bp->b_blkno; 690 vp->v_cstart = lbn + 1; 691 vp->v_lastw = lbn; 692 return; 693 } 694 vp->v_clen = maxclen; 695 if (!async && maxclen == 0) { /* I/O not contiguous */ 696 vp->v_cstart = lbn + 1; 697 bawrite(bp); 698 } else { /* Wait for rest of cluster */ 699 vp->v_cstart = lbn; 700 bdwrite(bp); 701 } 702 } else if (lbn == vp->v_cstart + vp->v_clen) { 703 /* 704 * At end of cluster, write it out if seqcount tells us we 705 * are operating sequentially, otherwise let the buf or 706 * update daemon handle it. 707 */ 708 bdwrite(bp); 709 if (seqcount > 1) 710 cluster_wbuild_wb(vp, lblocksize, vp->v_cstart, vp->v_clen + 1); 711 vp->v_clen = 0; 712 vp->v_cstart = lbn + 1; 713 } else if (vm_page_count_severe()) { 714 /* 715 * We are low on memory, get it going NOW 716 */ 717 bawrite(bp); 718 } else { 719 /* 720 * In the middle of a cluster, so just delay the I/O for now. 721 */ 722 bdwrite(bp); 723 } 724 vp->v_lastw = lbn; 725 vp->v_lasta = bp->b_blkno; 726} 727 728 729/* 730 * This is an awful lot like cluster_rbuild...wish they could be combined. 731 * The last lbn argument is the current block on which I/O is being 732 * performed. Check to see that it doesn't fall in the middle of 733 * the current block (if last_bp == NULL). 734 */ 735int 736cluster_wbuild(vp, size, start_lbn, len) 737 struct vnode *vp; 738 long size; 739 daddr_t start_lbn; 740 int len; 741{ 742 struct buf *bp, *tbp; 743 int i, j, s; 744 int totalwritten = 0; 745 int dbsize = btodb(size); 746 747 GIANT_REQUIRED; 748 749 while (len > 0) { 750 s = splbio(); 751 /* 752 * If the buffer is not delayed-write (i.e. dirty), or it 753 * is delayed-write but either locked or inval, it cannot 754 * partake in the clustered write. 755 */ 756 VI_LOCK(vp); 757 if ((tbp = gbincore(vp, start_lbn)) == NULL) { 758 VI_UNLOCK(vp); 759 ++start_lbn; 760 --len; 761 splx(s); 762 continue; 763 } 764 if (BUF_LOCK(tbp, 765 LK_EXCLUSIVE | LK_NOWAIT | LK_INTERLOCK, VI_MTX(vp))) { 766 ++start_lbn; 767 --len; 768 splx(s); 769 continue; 770 } 771 if ((tbp->b_flags & (B_LOCKED | B_INVAL | B_DELWRI)) != 772 B_DELWRI) { 773 BUF_UNLOCK(tbp); 774 ++start_lbn; 775 --len; 776 splx(s); 777 continue; 778 } 779 bremfree(tbp); 780 tbp->b_flags &= ~B_DONE; 781 splx(s); 782 783 /* 784 * Extra memory in the buffer, punt on this buffer. 785 * XXX we could handle this in most cases, but we would 786 * have to push the extra memory down to after our max 787 * possible cluster size and then potentially pull it back 788 * up if the cluster was terminated prematurely--too much 789 * hassle. 790 */ 791 if (((tbp->b_flags & (B_CLUSTEROK | B_MALLOC | B_VMIO)) != 792 (B_CLUSTEROK | B_VMIO)) || 793 (tbp->b_bcount != tbp->b_bufsize) || 794 (tbp->b_bcount != size) || 795 (len == 1) || 796 ((bp = getpbuf(&cluster_pbuf_freecnt)) == NULL)) { 797 totalwritten += tbp->b_bufsize; 798 bawrite(tbp); 799 ++start_lbn; 800 --len; 801 continue; 802 } 803 804 /* 805 * We got a pbuf to make the cluster in. 806 * so initialise it. 807 */ 808 TAILQ_INIT(&bp->b_cluster.cluster_head); 809 bp->b_bcount = 0; 810 bp->b_magic = tbp->b_magic; 811 bp->b_op = tbp->b_op; 812 bp->b_bufsize = 0; 813 bp->b_npages = 0; 814 if (tbp->b_wcred != NOCRED) 815 bp->b_wcred = crhold(tbp->b_wcred); 816 817 bp->b_blkno = tbp->b_blkno; 818 bp->b_lblkno = tbp->b_lblkno; 819 bp->b_offset = tbp->b_offset; 820 821 /* 822 * We are synthesizing a buffer out of vm_page_t's, but 823 * if the block size is not page aligned then the starting 824 * address may not be either. Inherit the b_data offset 825 * from the original buffer. 826 */ 827 bp->b_data = (char *)((vm_offset_t)bp->b_data | 828 ((vm_offset_t)tbp->b_data & PAGE_MASK)); 829 bp->b_flags |= B_CLUSTER | 830 (tbp->b_flags & (B_VMIO | B_NEEDCOMMIT | B_NOWDRAIN)); 831 bp->b_iodone = cluster_callback; 832 pbgetvp(vp, bp); 833 /* 834 * From this location in the file, scan forward to see 835 * if there are buffers with adjacent data that need to 836 * be written as well. 837 */ 838 for (i = 0; i < len; ++i, ++start_lbn) { 839 if (i != 0) { /* If not the first buffer */ 840 s = splbio(); 841 /* 842 * If the adjacent data is not even in core it 843 * can't need to be written. 844 */ 845 VI_LOCK(vp); 846 if ((tbp = gbincore(vp, start_lbn)) == NULL) { 847 VI_UNLOCK(vp); 848 splx(s); 849 break; 850 } 851 852 /* 853 * If it IS in core, but has different 854 * characteristics, or is locked (which 855 * means it could be undergoing a background 856 * I/O or be in a weird state), then don't 857 * cluster with it. 858 */ 859 if (BUF_LOCK(tbp, 860 LK_EXCLUSIVE | LK_NOWAIT | LK_INTERLOCK, 861 VI_MTX(vp))) { 862 splx(s); 863 break; 864 } 865 866 if ((tbp->b_flags & (B_VMIO | B_CLUSTEROK | 867 B_INVAL | B_DELWRI | B_NEEDCOMMIT)) 868 != (B_DELWRI | B_CLUSTEROK | 869 (bp->b_flags & (B_VMIO | B_NEEDCOMMIT))) || 870 (tbp->b_flags & B_LOCKED) || 871 tbp->b_wcred != bp->b_wcred) { 872 BUF_UNLOCK(tbp); 873 splx(s); 874 break; 875 } 876 877 /* 878 * Check that the combined cluster 879 * would make sense with regard to pages 880 * and would not be too large 881 */ 882 if ((tbp->b_bcount != size) || 883 ((bp->b_blkno + (dbsize * i)) != 884 tbp->b_blkno) || 885 ((tbp->b_npages + bp->b_npages) > 886 (vp->v_mount->mnt_iosize_max / PAGE_SIZE))) { 887 BUF_UNLOCK(tbp); 888 splx(s); 889 break; 890 } 891 /* 892 * Ok, it's passed all the tests, 893 * so remove it from the free list 894 * and mark it busy. We will use it. 895 */ 896 bremfree(tbp); 897 tbp->b_flags &= ~B_DONE; 898 splx(s); 899 } /* end of code for non-first buffers only */ 900 /* check for latent dependencies to be handled */ 901 if ((LIST_FIRST(&tbp->b_dep)) != NULL) 902 buf_start(tbp); 903 /* 904 * If the IO is via the VM then we do some 905 * special VM hackery (yuck). Since the buffer's 906 * block size may not be page-aligned it is possible 907 * for a page to be shared between two buffers. We 908 * have to get rid of the duplication when building 909 * the cluster. 910 */ 911 if (tbp->b_flags & B_VMIO) { 912 vm_page_t m; 913 914 if (i != 0) { /* if not first buffer */ 915 for (j = 0; j < tbp->b_npages; j += 1) { 916 m = tbp->b_pages[j]; 917 if (m->flags & PG_BUSY) { 918 bqrelse(tbp); 919 goto finishcluster; 920 } 921 } 922 } 923 vm_page_lock_queues(); 924 for (j = 0; j < tbp->b_npages; j += 1) { 925 m = tbp->b_pages[j]; 926 vm_page_io_start(m); 927 vm_object_pip_add(m->object, 1); 928 if ((bp->b_npages == 0) || 929 (bp->b_pages[bp->b_npages - 1] != m)) { 930 bp->b_pages[bp->b_npages] = m; 931 bp->b_npages++; 932 } 933 } 934 vm_page_unlock_queues(); 935 } 936 bp->b_bcount += size; 937 bp->b_bufsize += size; 938 939 s = splbio(); 940 bundirty(tbp); 941 tbp->b_flags &= ~B_DONE; 942 tbp->b_ioflags &= ~BIO_ERROR; 943 tbp->b_flags |= B_ASYNC; 944 tbp->b_iocmd = BIO_WRITE; 945 reassignbuf(tbp, tbp->b_vp); /* put on clean list */ 946 VI_LOCK(tbp->b_vp); 947 ++tbp->b_vp->v_numoutput; 948 VI_UNLOCK(tbp->b_vp); 949 splx(s); 950 BUF_KERNPROC(tbp); 951 TAILQ_INSERT_TAIL(&bp->b_cluster.cluster_head, 952 tbp, b_cluster.cluster_entry); 953 } 954 finishcluster: 955 pmap_qenter(trunc_page((vm_offset_t) bp->b_data), 956 (vm_page_t *) bp->b_pages, bp->b_npages); 957 if (bp->b_bufsize > bp->b_kvasize) 958 panic( 959 "cluster_wbuild: b_bufsize(%ld) > b_kvasize(%d)\n", 960 bp->b_bufsize, bp->b_kvasize); 961 bp->b_kvasize = bp->b_bufsize; 962 totalwritten += bp->b_bufsize; 963 bp->b_dirtyoff = 0; 964 bp->b_dirtyend = bp->b_bufsize; 965 bawrite(bp); 966 967 len -= i; 968 } 969 return totalwritten; 970} 971 972/* 973 * Collect together all the buffers in a cluster. 974 * Plus add one additional buffer. 975 */ 976static struct cluster_save * 977cluster_collectbufs(vp, last_bp) 978 struct vnode *vp; 979 struct buf *last_bp; 980{ 981 struct cluster_save *buflist; 982 struct buf *bp; 983 daddr_t lbn; 984 int i, len; 985 986 len = vp->v_lastw - vp->v_cstart + 1; 987 buflist = malloc(sizeof(struct buf *) * (len + 1) + sizeof(*buflist), 988 M_SEGMENT, M_WAITOK); 989 buflist->bs_nchildren = 0; 990 buflist->bs_children = (struct buf **) (buflist + 1); 991 for (lbn = vp->v_cstart, i = 0; i < len; lbn++, i++) { 992 (void) bread(vp, lbn, last_bp->b_bcount, NOCRED, &bp); 993 buflist->bs_children[i] = bp; 994 if (bp->b_blkno == bp->b_lblkno) 995 VOP_BMAP(bp->b_vp, bp->b_lblkno, NULL, &bp->b_blkno, 996 NULL, NULL); 997 } 998 buflist->bs_children[i] = bp = last_bp; 999 if (bp->b_blkno == bp->b_lblkno) 1000 VOP_BMAP(bp->b_vp, bp->b_lblkno, NULL, &bp->b_blkno, 1001 NULL, NULL); 1002 buflist->bs_nchildren = i + 1; 1003 return (buflist); 1004} 1005