vfs_cluster.c revision 75580
1/*- 2 * Copyright (c) 1993 3 * The Regents of the University of California. All rights reserved. 4 * Modifications/enhancements: 5 * Copyright (c) 1995 John S. Dyson. All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 3. All advertising materials mentioning features or use of this software 16 * must display the following acknowledgement: 17 * This product includes software developed by the University of 18 * California, Berkeley and its contributors. 19 * 4. Neither the name of the University nor the names of its contributors 20 * may be used to endorse or promote products derived from this software 21 * without specific prior written permission. 22 * 23 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 24 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 25 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 26 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 27 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 28 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 29 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 30 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 31 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 32 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 33 * SUCH DAMAGE. 34 * 35 * @(#)vfs_cluster.c 8.7 (Berkeley) 2/13/94 36 * $FreeBSD: head/sys/kern/vfs_cluster.c 75580 2001-04-17 08:56:39Z phk $ 37 */ 38 39#include "opt_debug_cluster.h" 40 41#include <sys/param.h> 42#include <sys/systm.h> 43#include <sys/kernel.h> 44#include <sys/proc.h> 45#include <sys/bio.h> 46#include <sys/buf.h> 47#include <sys/vnode.h> 48#include <sys/malloc.h> 49#include <sys/mount.h> 50#include <sys/resourcevar.h> 51#include <sys/vmmeter.h> 52#include <vm/vm.h> 53#include <vm/vm_object.h> 54#include <vm/vm_page.h> 55#include <sys/sysctl.h> 56 57#if defined(CLUSTERDEBUG) 58#include <sys/sysctl.h> 59static int rcluster= 0; 60SYSCTL_INT(_debug, OID_AUTO, rcluster, CTLFLAG_RW, &rcluster, 0, ""); 61#endif 62 63static MALLOC_DEFINE(M_SEGMENT, "cluster_save buffer", "cluster_save buffer"); 64 65static struct cluster_save * 66 cluster_collectbufs __P((struct vnode *vp, struct buf *last_bp)); 67static struct buf * 68 cluster_rbuild __P((struct vnode *vp, u_quad_t filesize, daddr_t lbn, 69 daddr_t blkno, long size, int run, struct buf *fbp)); 70 71static int write_behind = 1; 72SYSCTL_INT(_vfs, OID_AUTO, write_behind, CTLFLAG_RW, &write_behind, 0, ""); 73 74extern vm_page_t bogus_page; 75 76extern int cluster_pbuf_freecnt; 77 78/* 79 * Maximum number of blocks for read-ahead. 80 */ 81#define MAXRA 32 82 83/* 84 * This replaces bread. 85 */ 86int 87cluster_read(vp, filesize, lblkno, size, cred, totread, seqcount, bpp) 88 struct vnode *vp; 89 u_quad_t filesize; 90 daddr_t lblkno; 91 long size; 92 struct ucred *cred; 93 long totread; 94 int seqcount; 95 struct buf **bpp; 96{ 97 struct buf *bp, *rbp, *reqbp; 98 daddr_t blkno, origblkno; 99 int error, num_ra; 100 int i; 101 int maxra, racluster; 102 long origtotread; 103 104 error = 0; 105 106 /* 107 * Try to limit the amount of read-ahead by a few 108 * ad-hoc parameters. This needs work!!! 109 */ 110 racluster = vp->v_mount->mnt_iosize_max / size; 111 maxra = 2 * racluster + (totread / size); 112 if (maxra > MAXRA) 113 maxra = MAXRA; 114 if (maxra > nbuf/8) 115 maxra = nbuf/8; 116 117 /* 118 * get the requested block 119 */ 120 *bpp = reqbp = bp = getblk(vp, lblkno, size, 0, 0); 121 origblkno = lblkno; 122 origtotread = totread; 123 124 /* 125 * if it is in the cache, then check to see if the reads have been 126 * sequential. If they have, then try some read-ahead, otherwise 127 * back-off on prospective read-aheads. 128 */ 129 if (bp->b_flags & B_CACHE) { 130 if (!seqcount) { 131 return 0; 132 } else if ((bp->b_flags & B_RAM) == 0) { 133 return 0; 134 } else { 135 int s; 136 struct buf *tbp; 137 bp->b_flags &= ~B_RAM; 138 /* 139 * We do the spl here so that there is no window 140 * between the incore and the b_usecount increment 141 * below. We opt to keep the spl out of the loop 142 * for efficiency. 143 */ 144 s = splbio(); 145 for (i = 1; i < maxra; i++) { 146 147 if (!(tbp = incore(vp, lblkno+i))) { 148 break; 149 } 150 151 /* 152 * Set another read-ahead mark so we know 153 * to check again. 154 */ 155 if (((i % racluster) == (racluster - 1)) || 156 (i == (maxra - 1))) 157 tbp->b_flags |= B_RAM; 158 } 159 splx(s); 160 if (i >= maxra) { 161 return 0; 162 } 163 lblkno += i; 164 } 165 reqbp = bp = NULL; 166 } else { 167 off_t firstread = bp->b_offset; 168 169 KASSERT(bp->b_offset != NOOFFSET, 170 ("cluster_read: no buffer offset")); 171 if (firstread + totread > filesize) 172 totread = filesize - firstread; 173 if (totread > size) { 174 int nblks = 0; 175 int ncontigafter; 176 while (totread > 0) { 177 nblks++; 178 totread -= size; 179 } 180 if (nblks == 1) 181 goto single_block_read; 182 if (nblks > racluster) 183 nblks = racluster; 184 185 error = VOP_BMAP(vp, lblkno, NULL, 186 &blkno, &ncontigafter, NULL); 187 if (error) 188 goto single_block_read; 189 if (blkno == -1) 190 goto single_block_read; 191 if (ncontigafter == 0) 192 goto single_block_read; 193 if (ncontigafter + 1 < nblks) 194 nblks = ncontigafter + 1; 195 196 bp = cluster_rbuild(vp, filesize, lblkno, 197 blkno, size, nblks, bp); 198 lblkno += (bp->b_bufsize / size); 199 } else { 200single_block_read: 201 /* 202 * if it isn't in the cache, then get a chunk from 203 * disk if sequential, otherwise just get the block. 204 */ 205 bp->b_flags |= B_RAM; 206 bp->b_iocmd = BIO_READ; 207 lblkno += 1; 208 } 209 } 210 211 /* 212 * if we have been doing sequential I/O, then do some read-ahead 213 */ 214 rbp = NULL; 215 if (seqcount && (lblkno < (origblkno + seqcount))) { 216 /* 217 * we now build the read-ahead buffer if it is desirable. 218 */ 219 if (((u_quad_t)(lblkno + 1) * size) <= filesize && 220 !(error = VOP_BMAP(vp, lblkno, NULL, &blkno, &num_ra, NULL)) && 221 blkno != -1) { 222 int nblksread; 223 int ntoread = num_ra + 1; 224 nblksread = (origtotread + size - 1) / size; 225 if (seqcount < nblksread) 226 seqcount = nblksread; 227 if (seqcount < ntoread) 228 ntoread = seqcount; 229 if (num_ra) { 230 rbp = cluster_rbuild(vp, filesize, lblkno, 231 blkno, size, ntoread, NULL); 232 } else { 233 rbp = getblk(vp, lblkno, size, 0, 0); 234 rbp->b_flags |= B_ASYNC | B_RAM; 235 rbp->b_iocmd = BIO_READ; 236 rbp->b_blkno = blkno; 237 } 238 } 239 } 240 241 /* 242 * handle the synchronous read 243 */ 244 if (bp) { 245#if defined(CLUSTERDEBUG) 246 if (rcluster) 247 printf("S(%ld,%ld,%d) ", 248 (long)bp->b_lblkno, bp->b_bcount, seqcount); 249#endif 250 if ((bp->b_flags & B_CLUSTER) == 0) { 251 vfs_busy_pages(bp, 0); 252 } 253 bp->b_flags &= ~B_INVAL; 254 bp->b_ioflags &= ~BIO_ERROR; 255 if ((bp->b_flags & B_ASYNC) || bp->b_iodone != NULL) 256 BUF_KERNPROC(bp); 257 error = VOP_STRATEGY(vp, bp); 258 curproc->p_stats->p_ru.ru_inblock++; 259 } 260 261 /* 262 * and if we have read-aheads, do them too 263 */ 264 if (rbp) { 265 if (error) { 266 rbp->b_flags &= ~B_ASYNC; 267 brelse(rbp); 268 } else if (rbp->b_flags & B_CACHE) { 269 rbp->b_flags &= ~B_ASYNC; 270 bqrelse(rbp); 271 } else { 272#if defined(CLUSTERDEBUG) 273 if (rcluster) { 274 if (bp) 275 printf("A+(%ld,%ld,%ld,%d) ", 276 (long)rbp->b_lblkno, rbp->b_bcount, 277 (long)(rbp->b_lblkno - origblkno), 278 seqcount); 279 else 280 printf("A(%ld,%ld,%ld,%d) ", 281 (long)rbp->b_lblkno, rbp->b_bcount, 282 (long)(rbp->b_lblkno - origblkno), 283 seqcount); 284 } 285#endif 286 287 if ((rbp->b_flags & B_CLUSTER) == 0) { 288 vfs_busy_pages(rbp, 0); 289 } 290 rbp->b_flags &= ~B_INVAL; 291 rbp->b_ioflags &= ~BIO_ERROR; 292 if ((rbp->b_flags & B_ASYNC) || rbp->b_iodone != NULL) 293 BUF_KERNPROC(rbp); 294 (void) VOP_STRATEGY(vp, rbp); 295 curproc->p_stats->p_ru.ru_inblock++; 296 } 297 } 298 if (reqbp) 299 return (bufwait(reqbp)); 300 else 301 return (error); 302} 303 304/* 305 * If blocks are contiguous on disk, use this to provide clustered 306 * read ahead. We will read as many blocks as possible sequentially 307 * and then parcel them up into logical blocks in the buffer hash table. 308 */ 309static struct buf * 310cluster_rbuild(vp, filesize, lbn, blkno, size, run, fbp) 311 struct vnode *vp; 312 u_quad_t filesize; 313 daddr_t lbn; 314 daddr_t blkno; 315 long size; 316 int run; 317 struct buf *fbp; 318{ 319 struct buf *bp, *tbp; 320 daddr_t bn; 321 int i, inc, j; 322 323 KASSERT(size == vp->v_mount->mnt_stat.f_iosize, 324 ("cluster_rbuild: size %ld != filesize %ld\n", 325 size, vp->v_mount->mnt_stat.f_iosize)); 326 327 /* 328 * avoid a division 329 */ 330 while ((u_quad_t) size * (lbn + run) > filesize) { 331 --run; 332 } 333 334 if (fbp) { 335 tbp = fbp; 336 tbp->b_iocmd = BIO_READ; 337 } else { 338 tbp = getblk(vp, lbn, size, 0, 0); 339 if (tbp->b_flags & B_CACHE) 340 return tbp; 341 tbp->b_flags |= B_ASYNC | B_RAM; 342 tbp->b_iocmd = BIO_READ; 343 } 344 345 tbp->b_blkno = blkno; 346 if( (tbp->b_flags & B_MALLOC) || 347 ((tbp->b_flags & B_VMIO) == 0) || (run <= 1) ) 348 return tbp; 349 350 bp = trypbuf(&cluster_pbuf_freecnt); 351 if (bp == 0) 352 return tbp; 353 354 bp->b_data = (char *)((vm_offset_t)bp->b_data | 355 ((vm_offset_t)tbp->b_data & PAGE_MASK)); 356 bp->b_flags = B_ASYNC | B_CLUSTER | B_VMIO; 357 bp->b_iocmd = BIO_READ; 358 bp->b_iodone = cluster_callback; 359 bp->b_blkno = blkno; 360 bp->b_lblkno = lbn; 361 bp->b_offset = tbp->b_offset; 362 KASSERT(bp->b_offset != NOOFFSET, ("cluster_rbuild: no buffer offset")); 363 pbgetvp(vp, bp); 364 365 TAILQ_INIT(&bp->b_cluster.cluster_head); 366 367 bp->b_bcount = 0; 368 bp->b_bufsize = 0; 369 bp->b_npages = 0; 370 371 inc = btodb(size); 372 for (bn = blkno, i = 0; i < run; ++i, bn += inc) { 373 if (i != 0) { 374 if ((bp->b_npages * PAGE_SIZE) + 375 round_page(size) > vp->v_mount->mnt_iosize_max) 376 break; 377 378 if ((tbp = incore(vp, lbn + i)) != NULL) { 379 if (BUF_LOCK(tbp, LK_EXCLUSIVE | LK_NOWAIT)) 380 break; 381 BUF_UNLOCK(tbp); 382 383 for (j = 0; j < tbp->b_npages; j++) 384 if (tbp->b_pages[j]->valid) 385 break; 386 387 if (j != tbp->b_npages) 388 break; 389 390 if (tbp->b_bcount != size) 391 break; 392 } 393 394 tbp = getblk(vp, lbn + i, size, 0, 0); 395 396 /* 397 * If the buffer is already fully valid or locked 398 * (which could also mean that a background write is 399 * in progress), or the buffer is not backed by VMIO, 400 * stop. 401 */ 402 if ((tbp->b_flags & (B_CACHE|B_LOCKED)) || 403 (tbp->b_flags & B_VMIO) == 0) { 404 bqrelse(tbp); 405 break; 406 } 407 408 for (j = 0;j < tbp->b_npages; j++) { 409 if (tbp->b_pages[j]->valid) 410 break; 411 } 412 413 if (j != tbp->b_npages) { 414 bqrelse(tbp); 415 break; 416 } 417 418 if ((fbp && (i == 1)) || (i == (run - 1))) 419 tbp->b_flags |= B_RAM; 420 tbp->b_flags |= B_ASYNC; 421 tbp->b_iocmd = BIO_READ; 422 if (tbp->b_blkno == tbp->b_lblkno) { 423 tbp->b_blkno = bn; 424 } else if (tbp->b_blkno != bn) { 425 brelse(tbp); 426 break; 427 } 428 } 429 /* 430 * XXX fbp from caller may not be B_ASYNC, but we are going 431 * to biodone() it in cluster_callback() anyway 432 */ 433 BUF_KERNPROC(tbp); 434 TAILQ_INSERT_TAIL(&bp->b_cluster.cluster_head, 435 tbp, b_cluster.cluster_entry); 436 for (j = 0; j < tbp->b_npages; j += 1) { 437 vm_page_t m; 438 m = tbp->b_pages[j]; 439 vm_page_io_start(m); 440 vm_object_pip_add(m->object, 1); 441 if ((bp->b_npages == 0) || 442 (bp->b_pages[bp->b_npages-1] != m)) { 443 bp->b_pages[bp->b_npages] = m; 444 bp->b_npages++; 445 } 446 if ((m->valid & VM_PAGE_BITS_ALL) == VM_PAGE_BITS_ALL) 447 tbp->b_pages[j] = bogus_page; 448 } 449 bp->b_bcount += tbp->b_bcount; 450 bp->b_bufsize += tbp->b_bufsize; 451 } 452 453 for(j=0;j<bp->b_npages;j++) { 454 if ((bp->b_pages[j]->valid & VM_PAGE_BITS_ALL) == 455 VM_PAGE_BITS_ALL) 456 bp->b_pages[j] = bogus_page; 457 } 458 if (bp->b_bufsize > bp->b_kvasize) 459 panic("cluster_rbuild: b_bufsize(%ld) > b_kvasize(%d)\n", 460 bp->b_bufsize, bp->b_kvasize); 461 bp->b_kvasize = bp->b_bufsize; 462 463 pmap_qenter(trunc_page((vm_offset_t) bp->b_data), 464 (vm_page_t *)bp->b_pages, bp->b_npages); 465 return (bp); 466} 467 468/* 469 * Cleanup after a clustered read or write. 470 * This is complicated by the fact that any of the buffers might have 471 * extra memory (if there were no empty buffer headers at allocbuf time) 472 * that we will need to shift around. 473 */ 474void 475cluster_callback(bp) 476 struct buf *bp; 477{ 478 struct buf *nbp, *tbp; 479 int error = 0; 480 481 /* 482 * Must propogate errors to all the components. 483 */ 484 if (bp->b_ioflags & BIO_ERROR) 485 error = bp->b_error; 486 487 pmap_qremove(trunc_page((vm_offset_t) bp->b_data), bp->b_npages); 488 /* 489 * Move memory from the large cluster buffer into the component 490 * buffers and mark IO as done on these. 491 */ 492 for (tbp = TAILQ_FIRST(&bp->b_cluster.cluster_head); 493 tbp; tbp = nbp) { 494 nbp = TAILQ_NEXT(&tbp->b_cluster, cluster_entry); 495 if (error) { 496 tbp->b_ioflags |= BIO_ERROR; 497 tbp->b_error = error; 498 } else { 499 tbp->b_dirtyoff = tbp->b_dirtyend = 0; 500 tbp->b_flags &= ~B_INVAL; 501 tbp->b_ioflags &= ~BIO_ERROR; 502 } 503 bufdone(tbp); 504 } 505 relpbuf(bp, &cluster_pbuf_freecnt); 506} 507 508/* 509 * cluster_wbuild_wb: 510 * 511 * Implement modified write build for cluster. 512 * 513 * write_behind = 0 write behind disabled 514 * write_behind = 1 write behind normal (default) 515 * write_behind = 2 write behind backed-off 516 */ 517 518static __inline int 519cluster_wbuild_wb(struct vnode *vp, long size, daddr_t start_lbn, int len) 520{ 521 int r = 0; 522 523 switch(write_behind) { 524 case 2: 525 if (start_lbn < len) 526 break; 527 start_lbn -= len; 528 /* fall through */ 529 case 1: 530 r = cluster_wbuild(vp, size, start_lbn, len); 531 /* fall through */ 532 default: 533 /* fall through */ 534 break; 535 } 536 return(r); 537} 538 539/* 540 * Do clustered write for FFS. 541 * 542 * Three cases: 543 * 1. Write is not sequential (write asynchronously) 544 * Write is sequential: 545 * 2. beginning of cluster - begin cluster 546 * 3. middle of a cluster - add to cluster 547 * 4. end of a cluster - asynchronously write cluster 548 */ 549void 550cluster_write(bp, filesize, seqcount) 551 struct buf *bp; 552 u_quad_t filesize; 553 int seqcount; 554{ 555 struct vnode *vp; 556 daddr_t lbn; 557 int maxclen, cursize; 558 int lblocksize; 559 int async; 560 561 vp = bp->b_vp; 562 if (vp->v_type == VREG) { 563 async = vp->v_mount->mnt_flag & MNT_ASYNC; 564 lblocksize = vp->v_mount->mnt_stat.f_iosize; 565 } else { 566 async = 0; 567 lblocksize = bp->b_bufsize; 568 } 569 lbn = bp->b_lblkno; 570 KASSERT(bp->b_offset != NOOFFSET, ("cluster_write: no buffer offset")); 571 572 /* Initialize vnode to beginning of file. */ 573 if (lbn == 0) 574 vp->v_lasta = vp->v_clen = vp->v_cstart = vp->v_lastw = 0; 575 576 if (vp->v_clen == 0 || lbn != vp->v_lastw + 1 || 577 (bp->b_blkno != vp->v_lasta + btodb(lblocksize))) { 578 maxclen = vp->v_mount->mnt_iosize_max / lblocksize - 1; 579 if (vp->v_clen != 0) { 580 /* 581 * Next block is not sequential. 582 * 583 * If we are not writing at end of file, the process 584 * seeked to another point in the file since its last 585 * write, or we have reached our maximum cluster size, 586 * then push the previous cluster. Otherwise try 587 * reallocating to make it sequential. 588 * 589 * Change to algorithm: only push previous cluster if 590 * it was sequential from the point of view of the 591 * seqcount heuristic, otherwise leave the buffer 592 * intact so we can potentially optimize the I/O 593 * later on in the buf_daemon or update daemon 594 * flush. 595 */ 596 cursize = vp->v_lastw - vp->v_cstart + 1; 597 if (((u_quad_t) bp->b_offset + lblocksize) != filesize || 598 lbn != vp->v_lastw + 1 || vp->v_clen <= cursize) { 599 if (!async && seqcount > 0) { 600 cluster_wbuild_wb(vp, lblocksize, 601 vp->v_cstart, cursize); 602 } 603 } else { 604 struct buf **bpp, **endbp; 605 struct cluster_save *buflist; 606 607 buflist = cluster_collectbufs(vp, bp); 608 endbp = &buflist->bs_children 609 [buflist->bs_nchildren - 1]; 610 if (VOP_REALLOCBLKS(vp, buflist)) { 611 /* 612 * Failed, push the previous cluster 613 * if *really* writing sequentially 614 * in the logical file (seqcount > 1), 615 * otherwise delay it in the hopes that 616 * the low level disk driver can 617 * optimize the write ordering. 618 */ 619 for (bpp = buflist->bs_children; 620 bpp < endbp; bpp++) 621 brelse(*bpp); 622 free(buflist, M_SEGMENT); 623 if (seqcount > 1) { 624 cluster_wbuild_wb(vp, 625 lblocksize, vp->v_cstart, 626 cursize); 627 } 628 } else { 629 /* 630 * Succeeded, keep building cluster. 631 */ 632 for (bpp = buflist->bs_children; 633 bpp <= endbp; bpp++) 634 bdwrite(*bpp); 635 free(buflist, M_SEGMENT); 636 vp->v_lastw = lbn; 637 vp->v_lasta = bp->b_blkno; 638 return; 639 } 640 } 641 } 642 /* 643 * Consider beginning a cluster. If at end of file, make 644 * cluster as large as possible, otherwise find size of 645 * existing cluster. 646 */ 647 if ((vp->v_type == VREG) && 648 ((u_quad_t) bp->b_offset + lblocksize) != filesize && 649 (bp->b_blkno == bp->b_lblkno) && 650 (VOP_BMAP(vp, lbn, NULL, &bp->b_blkno, &maxclen, NULL) || 651 bp->b_blkno == -1)) { 652 bawrite(bp); 653 vp->v_clen = 0; 654 vp->v_lasta = bp->b_blkno; 655 vp->v_cstart = lbn + 1; 656 vp->v_lastw = lbn; 657 return; 658 } 659 vp->v_clen = maxclen; 660 if (!async && maxclen == 0) { /* I/O not contiguous */ 661 vp->v_cstart = lbn + 1; 662 bawrite(bp); 663 } else { /* Wait for rest of cluster */ 664 vp->v_cstart = lbn; 665 bdwrite(bp); 666 } 667 } else if (lbn == vp->v_cstart + vp->v_clen) { 668 /* 669 * At end of cluster, write it out if seqcount tells us we 670 * are operating sequentially, otherwise let the buf or 671 * update daemon handle it. 672 */ 673 bdwrite(bp); 674 if (seqcount > 1) 675 cluster_wbuild_wb(vp, lblocksize, vp->v_cstart, vp->v_clen + 1); 676 vp->v_clen = 0; 677 vp->v_cstart = lbn + 1; 678 } else if (vm_page_count_severe()) { 679 /* 680 * We are low on memory, get it going NOW 681 */ 682 bawrite(bp); 683 } else { 684 /* 685 * In the middle of a cluster, so just delay the I/O for now. 686 */ 687 bdwrite(bp); 688 } 689 vp->v_lastw = lbn; 690 vp->v_lasta = bp->b_blkno; 691} 692 693 694/* 695 * This is an awful lot like cluster_rbuild...wish they could be combined. 696 * The last lbn argument is the current block on which I/O is being 697 * performed. Check to see that it doesn't fall in the middle of 698 * the current block (if last_bp == NULL). 699 */ 700int 701cluster_wbuild(vp, size, start_lbn, len) 702 struct vnode *vp; 703 long size; 704 daddr_t start_lbn; 705 int len; 706{ 707 struct buf *bp, *tbp; 708 int i, j, s; 709 int totalwritten = 0; 710 int dbsize = btodb(size); 711 712 while (len > 0) { 713 s = splbio(); 714 /* 715 * If the buffer is not delayed-write (i.e. dirty), or it 716 * is delayed-write but either locked or inval, it cannot 717 * partake in the clustered write. 718 */ 719 if (((tbp = gbincore(vp, start_lbn)) == NULL) || 720 ((tbp->b_flags & (B_LOCKED | B_INVAL | B_DELWRI)) != B_DELWRI) || 721 BUF_LOCK(tbp, LK_EXCLUSIVE | LK_NOWAIT)) { 722 ++start_lbn; 723 --len; 724 splx(s); 725 continue; 726 } 727 bremfree(tbp); 728 tbp->b_flags &= ~B_DONE; 729 splx(s); 730 731 /* 732 * Extra memory in the buffer, punt on this buffer. 733 * XXX we could handle this in most cases, but we would 734 * have to push the extra memory down to after our max 735 * possible cluster size and then potentially pull it back 736 * up if the cluster was terminated prematurely--too much 737 * hassle. 738 */ 739 if (((tbp->b_flags & (B_CLUSTEROK | B_MALLOC | B_VMIO)) != 740 (B_CLUSTEROK | B_VMIO)) || 741 (tbp->b_bcount != tbp->b_bufsize) || 742 (tbp->b_bcount != size) || 743 (len == 1) || 744 ((bp = getpbuf(&cluster_pbuf_freecnt)) == NULL)) { 745 totalwritten += tbp->b_bufsize; 746 bawrite(tbp); 747 ++start_lbn; 748 --len; 749 continue; 750 } 751 752 /* 753 * We got a pbuf to make the cluster in. 754 * so initialise it. 755 */ 756 TAILQ_INIT(&bp->b_cluster.cluster_head); 757 bp->b_bcount = 0; 758 bp->b_magic = tbp->b_magic; 759 bp->b_op = tbp->b_op; 760 bp->b_bufsize = 0; 761 bp->b_npages = 0; 762 if (tbp->b_wcred != NOCRED) { 763 bp->b_wcred = tbp->b_wcred; 764 crhold(bp->b_wcred); 765 } 766 767 bp->b_blkno = tbp->b_blkno; 768 bp->b_lblkno = tbp->b_lblkno; 769 bp->b_offset = tbp->b_offset; 770 bp->b_data = (char *)((vm_offset_t)bp->b_data | 771 ((vm_offset_t)tbp->b_data & PAGE_MASK)); 772 bp->b_flags |= B_CLUSTER | 773 (tbp->b_flags & (B_VMIO | B_NEEDCOMMIT)); 774 bp->b_iodone = cluster_callback; 775 pbgetvp(vp, bp); 776 /* 777 * From this location in the file, scan forward to see 778 * if there are buffers with adjacent data that need to 779 * be written as well. 780 */ 781 for (i = 0; i < len; ++i, ++start_lbn) { 782 if (i != 0) { /* If not the first buffer */ 783 s = splbio(); 784 /* 785 * If the adjacent data is not even in core it 786 * can't need to be written. 787 */ 788 if ((tbp = gbincore(vp, start_lbn)) == NULL) { 789 splx(s); 790 break; 791 } 792 793 /* 794 * If it IS in core, but has different 795 * characteristics, or is locked (which 796 * means it could be undergoing a background 797 * I/O or be in a weird state), then don't 798 * cluster with it. 799 */ 800 if ((tbp->b_flags & (B_VMIO | B_CLUSTEROK | 801 B_INVAL | B_DELWRI | B_NEEDCOMMIT)) 802 != (B_DELWRI | B_CLUSTEROK | 803 (bp->b_flags & (B_VMIO | B_NEEDCOMMIT))) || 804 (tbp->b_flags & B_LOCKED) || 805 tbp->b_wcred != bp->b_wcred || 806 BUF_LOCK(tbp, LK_EXCLUSIVE | LK_NOWAIT)) { 807 splx(s); 808 break; 809 } 810 811 /* 812 * Check that the combined cluster 813 * would make sense with regard to pages 814 * and would not be too large 815 */ 816 if ((tbp->b_bcount != size) || 817 ((bp->b_blkno + (dbsize * i)) != 818 tbp->b_blkno) || 819 ((tbp->b_npages + bp->b_npages) > 820 (vp->v_mount->mnt_iosize_max / PAGE_SIZE))) { 821 BUF_UNLOCK(tbp); 822 splx(s); 823 break; 824 } 825 /* 826 * Ok, it's passed all the tests, 827 * so remove it from the free list 828 * and mark it busy. We will use it. 829 */ 830 bremfree(tbp); 831 tbp->b_flags &= ~B_DONE; 832 splx(s); 833 } /* end of code for non-first buffers only */ 834 /* check for latent dependencies to be handled */ 835 if ((LIST_FIRST(&tbp->b_dep)) != NULL) 836 buf_start(tbp); 837 /* 838 * If the IO is via the VM then we do some 839 * special VM hackery. (yuck) 840 */ 841 if (tbp->b_flags & B_VMIO) { 842 vm_page_t m; 843 844 if (i != 0) { /* if not first buffer */ 845 for (j = 0; j < tbp->b_npages; j += 1) { 846 m = tbp->b_pages[j]; 847 if (m->flags & PG_BUSY) { 848 bqrelse(tbp); 849 goto finishcluster; 850 } 851 } 852 } 853 854 for (j = 0; j < tbp->b_npages; j += 1) { 855 m = tbp->b_pages[j]; 856 vm_page_io_start(m); 857 vm_object_pip_add(m->object, 1); 858 if ((bp->b_npages == 0) || 859 (bp->b_pages[bp->b_npages - 1] != m)) { 860 bp->b_pages[bp->b_npages] = m; 861 bp->b_npages++; 862 } 863 } 864 } 865 bp->b_bcount += size; 866 bp->b_bufsize += size; 867 868 s = splbio(); 869 bundirty(tbp); 870 tbp->b_flags &= ~B_DONE; 871 tbp->b_ioflags &= ~BIO_ERROR; 872 tbp->b_flags |= B_ASYNC; 873 tbp->b_iocmd = BIO_WRITE; 874 reassignbuf(tbp, tbp->b_vp); /* put on clean list */ 875 ++tbp->b_vp->v_numoutput; 876 splx(s); 877 BUF_KERNPROC(tbp); 878 TAILQ_INSERT_TAIL(&bp->b_cluster.cluster_head, 879 tbp, b_cluster.cluster_entry); 880 } 881 finishcluster: 882 pmap_qenter(trunc_page((vm_offset_t) bp->b_data), 883 (vm_page_t *) bp->b_pages, bp->b_npages); 884 if (bp->b_bufsize > bp->b_kvasize) 885 panic( 886 "cluster_wbuild: b_bufsize(%ld) > b_kvasize(%d)\n", 887 bp->b_bufsize, bp->b_kvasize); 888 bp->b_kvasize = bp->b_bufsize; 889 totalwritten += bp->b_bufsize; 890 bp->b_dirtyoff = 0; 891 bp->b_dirtyend = bp->b_bufsize; 892 bawrite(bp); 893 894 len -= i; 895 } 896 return totalwritten; 897} 898 899/* 900 * Collect together all the buffers in a cluster. 901 * Plus add one additional buffer. 902 */ 903static struct cluster_save * 904cluster_collectbufs(vp, last_bp) 905 struct vnode *vp; 906 struct buf *last_bp; 907{ 908 struct cluster_save *buflist; 909 struct buf *bp; 910 daddr_t lbn; 911 int i, len; 912 913 len = vp->v_lastw - vp->v_cstart + 1; 914 buflist = malloc(sizeof(struct buf *) * (len + 1) + sizeof(*buflist), 915 M_SEGMENT, M_WAITOK); 916 buflist->bs_nchildren = 0; 917 buflist->bs_children = (struct buf **) (buflist + 1); 918 for (lbn = vp->v_cstart, i = 0; i < len; lbn++, i++) { 919 (void) bread(vp, lbn, last_bp->b_bcount, NOCRED, &bp); 920 buflist->bs_children[i] = bp; 921 if (bp->b_blkno == bp->b_lblkno) 922 VOP_BMAP(bp->b_vp, bp->b_lblkno, NULL, &bp->b_blkno, 923 NULL, NULL); 924 } 925 buflist->bs_children[i] = bp = last_bp; 926 if (bp->b_blkno == bp->b_lblkno) 927 VOP_BMAP(bp->b_vp, bp->b_lblkno, NULL, &bp->b_blkno, 928 NULL, NULL); 929 buflist->bs_nchildren = i + 1; 930 return (buflist); 931} 932