vfs_cluster.c revision 32724
1/*- 2 * Copyright (c) 1993 3 * The Regents of the University of California. All rights reserved. 4 * Modifications/enhancements: 5 * Copyright (c) 1995 John S. Dyson. All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 3. All advertising materials mentioning features or use of this software 16 * must display the following acknowledgement: 17 * This product includes software developed by the University of 18 * California, Berkeley and its contributors. 19 * 4. Neither the name of the University nor the names of its contributors 20 * may be used to endorse or promote products derived from this software 21 * without specific prior written permission. 22 * 23 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 24 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 25 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 26 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 27 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 28 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 29 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 30 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 31 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 32 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 33 * SUCH DAMAGE. 34 * 35 * @(#)vfs_cluster.c 8.7 (Berkeley) 2/13/94 36 * $Id: vfs_cluster.c,v 1.50 1998/01/06 05:16:01 dyson Exp $ 37 */ 38 39#include <sys/param.h> 40#include <sys/systm.h> 41#include <sys/proc.h> 42#include <sys/buf.h> 43#include <sys/vnode.h> 44#include <sys/mount.h> 45#include <sys/resourcevar.h> 46#include <vm/vm.h> 47#include <vm/vm_prot.h> 48#include <vm/vm_object.h> 49#include <vm/vm_page.h> 50 51#if defined(CLUSTERDEBUG) 52#include <sys/sysctl.h> 53#include <sys/kernel.h> 54static int rcluster= 0; 55SYSCTL_INT(_debug, OID_AUTO, rcluster, CTLFLAG_RW, &rcluster, 0, ""); 56#endif 57 58#ifdef notyet_block_reallocation_enabled 59static struct cluster_save * 60 cluster_collectbufs __P((struct vnode *vp, struct buf *last_bp)); 61#endif 62static struct buf * 63 cluster_rbuild __P((struct vnode *vp, u_quad_t filesize, daddr_t lbn, 64 daddr_t blkno, long size, int run, struct buf *fbp)); 65 66extern vm_page_t bogus_page; 67 68/* 69 * Maximum number of blocks for read-ahead. 70 */ 71#define MAXRA 32 72 73/* 74 * This replaces bread. 75 */ 76int 77cluster_read(vp, filesize, lblkno, size, cred, totread, seqcount, bpp) 78 struct vnode *vp; 79 u_quad_t filesize; 80 daddr_t lblkno; 81 long size; 82 struct ucred *cred; 83 long totread; 84 int seqcount; 85 struct buf **bpp; 86{ 87 struct buf *bp, *rbp, *reqbp; 88 daddr_t blkno, origblkno; 89 int error, num_ra; 90 int i; 91 int maxra, racluster; 92 long origtotread; 93 94 error = 0; 95 if (vp->v_maxio == 0) 96 vp->v_maxio = DFLTPHYS; 97 98 /* 99 * Try to limit the amount of read-ahead by a few 100 * ad-hoc parameters. This needs work!!! 101 */ 102 racluster = vp->v_maxio/size; 103 maxra = 2 * racluster + (totread / size); 104 if (maxra > MAXRA) 105 maxra = MAXRA; 106 if (maxra > nbuf/8) 107 maxra = nbuf/8; 108 109 /* 110 * get the requested block 111 */ 112 *bpp = reqbp = bp = getblk(vp, lblkno, size, 0, 0); 113 origblkno = lblkno; 114 origtotread = totread; 115 116 /* 117 * if it is in the cache, then check to see if the reads have been 118 * sequential. If they have, then try some read-ahead, otherwise 119 * back-off on prospective read-aheads. 120 */ 121 if (bp->b_flags & B_CACHE) { 122 if (!seqcount) { 123 return 0; 124 } else if ((bp->b_flags & B_RAM) == 0) { 125 return 0; 126 } else { 127 int s; 128 struct buf *tbp; 129 bp->b_flags &= ~B_RAM; 130 /* 131 * We do the spl here so that there is no window 132 * between the incore and the b_usecount increment 133 * below. We opt to keep the spl out of the loop 134 * for efficiency. 135 */ 136 s = splbio(); 137 for(i=1;i<maxra;i++) { 138 139 if (!(tbp = incore(vp, lblkno+i))) { 140 break; 141 } 142 143 /* 144 * Set another read-ahead mark so we know to check 145 * again. 146 */ 147 if (((i % racluster) == (racluster - 1)) || 148 (i == (maxra - 1))) 149 tbp->b_flags |= B_RAM; 150 151#if 0 152 if (tbp->b_usecount == 0) { 153 /* 154 * Make sure that the soon-to-be used readaheads 155 * are still there. The getblk/bqrelse pair will 156 * boost the priority of the buffer. 157 */ 158 tbp = getblk(vp, lblkno+i, size, 0, 0); 159 bqrelse(tbp); 160 } 161#endif 162 } 163 splx(s); 164 if (i >= maxra) { 165 return 0; 166 } 167 lblkno += i; 168 } 169 reqbp = bp = NULL; 170 } else { 171 u_quad_t firstread; 172 firstread = (u_quad_t) lblkno * size; 173 if (firstread + totread > filesize) 174 totread = filesize - firstread; 175 if (totread > size) { 176 int nblks = 0; 177 int ncontigafter; 178 while (totread > 0) { 179 nblks++; 180 totread -= size; 181 } 182 if (nblks == 1) 183 goto single_block_read; 184 if (nblks > racluster) 185 nblks = racluster; 186 187 error = VOP_BMAP(vp, lblkno, NULL, 188 &blkno, &ncontigafter, NULL); 189 if (error) 190 goto single_block_read; 191 if (blkno == -1) 192 goto single_block_read; 193 if (ncontigafter == 0) 194 goto single_block_read; 195 if (ncontigafter + 1 < nblks) 196 nblks = ncontigafter + 1; 197 198 bp = cluster_rbuild(vp, filesize, lblkno, 199 blkno, size, nblks, bp); 200 lblkno += nblks; 201 } else { 202single_block_read: 203 /* 204 * if it isn't in the cache, then get a chunk from 205 * disk if sequential, otherwise just get the block. 206 */ 207 bp->b_flags |= B_READ | B_RAM; 208 lblkno += 1; 209 } 210 } 211 212 /* 213 * if we have been doing sequential I/O, then do some read-ahead 214 */ 215 rbp = NULL; 216 /* if (seqcount && (lblkno < (origblkno + maxra))) { */ 217 if (seqcount && (lblkno < (origblkno + seqcount))) { 218 /* 219 * we now build the read-ahead buffer if it is desirable. 220 */ 221 if (((u_quad_t)(lblkno + 1) * size) <= filesize && 222 !(error = VOP_BMAP(vp, lblkno, NULL, &blkno, &num_ra, NULL)) && 223 blkno != -1) { 224 int nblksread; 225 int ntoread = num_ra + 1; 226 nblksread = (origtotread + size - 1) / size; 227 if (seqcount < nblksread) 228 seqcount = nblksread; 229 if (seqcount < ntoread) 230 ntoread = seqcount; 231 if (num_ra) { 232 rbp = cluster_rbuild(vp, filesize, lblkno, 233 blkno, size, ntoread, NULL); 234 } else { 235 rbp = getblk(vp, lblkno, size, 0, 0); 236 rbp->b_flags |= B_READ | B_ASYNC | B_RAM; 237 rbp->b_blkno = blkno; 238 } 239 } 240 } 241 242 /* 243 * handle the synchronous read 244 */ 245 if (bp) { 246 if (bp->b_flags & (B_DONE | B_DELWRI)) { 247 panic("cluster_read: DONE bp"); 248 } else { 249#if defined(CLUSTERDEBUG) 250 if (rcluster) 251 printf("S(%d,%d,%d) ", 252 bp->b_lblkno, bp->b_bcount, seqcount); 253#endif 254 if ((bp->b_flags & B_CLUSTER) == 0) 255 vfs_busy_pages(bp, 0); 256 error = VOP_STRATEGY(bp); 257 curproc->p_stats->p_ru.ru_inblock++; 258 } 259 } 260 /* 261 * and if we have read-aheads, do them too 262 */ 263 if (rbp) { 264 if (error) { 265 rbp->b_flags &= ~(B_ASYNC | B_READ); 266 brelse(rbp); 267 } else if (rbp->b_flags & B_CACHE) { 268 rbp->b_flags &= ~(B_ASYNC | B_READ); 269 bqrelse(rbp); 270 } else { 271#if defined(CLUSTERDEBUG) 272 if (rcluster) { 273 if (bp) 274 printf("A+(%d,%d,%d,%d) ", 275 rbp->b_lblkno, rbp->b_bcount, 276 rbp->b_lblkno - origblkno, 277 seqcount); 278 else 279 printf("A(%d,%d,%d,%d) ", 280 rbp->b_lblkno, rbp->b_bcount, 281 rbp->b_lblkno - origblkno, 282 seqcount); 283 } 284#endif 285 286 if ((rbp->b_flags & B_CLUSTER) == 0) 287 vfs_busy_pages(rbp, 0); 288 (void) VOP_STRATEGY(rbp); 289 curproc->p_stats->p_ru.ru_inblock++; 290 } 291 } 292 if (reqbp) 293 return (biowait(reqbp)); 294 else 295 return (error); 296} 297 298/* 299 * If blocks are contiguous on disk, use this to provide clustered 300 * read ahead. We will read as many blocks as possible sequentially 301 * and then parcel them up into logical blocks in the buffer hash table. 302 */ 303static struct buf * 304cluster_rbuild(vp, filesize, lbn, blkno, size, run, fbp) 305 struct vnode *vp; 306 u_quad_t filesize; 307 daddr_t lbn; 308 daddr_t blkno; 309 long size; 310 int run; 311 struct buf *fbp; 312{ 313 struct buf *bp, *tbp; 314 daddr_t bn; 315 int i, inc, j; 316 317#ifdef DIAGNOSTIC 318 if (size != vp->v_mount->mnt_stat.f_iosize) 319 panic("cluster_rbuild: size %d != filesize %d\n", 320 size, vp->v_mount->mnt_stat.f_iosize); 321#endif 322 /* 323 * avoid a division 324 */ 325 while ((u_quad_t) size * (lbn + run) > filesize) { 326 --run; 327 } 328 329 if (fbp) { 330 tbp = fbp; 331 tbp->b_flags |= B_READ; 332 } else { 333 tbp = getblk(vp, lbn, size, 0, 0); 334 if (tbp->b_flags & B_CACHE) 335 return tbp; 336 tbp->b_flags |= B_ASYNC | B_READ | B_RAM; 337 } 338 339 tbp->b_blkno = blkno; 340 if( (tbp->b_flags & B_MALLOC) || 341 ((tbp->b_flags & B_VMIO) == 0) || (run <= 1) ) 342 return tbp; 343 344 bp = trypbuf(); 345 if (bp == 0) 346 return tbp; 347 348 (vm_offset_t) bp->b_data |= ((vm_offset_t) tbp->b_data) & PAGE_MASK; 349 bp->b_flags = B_ASYNC | B_READ | B_CALL | B_BUSY | B_CLUSTER | B_VMIO; 350 bp->b_iodone = cluster_callback; 351 bp->b_blkno = blkno; 352 bp->b_lblkno = lbn; 353 pbgetvp(vp, bp); 354 355 TAILQ_INIT(&bp->b_cluster.cluster_head); 356 357 bp->b_bcount = 0; 358 bp->b_bufsize = 0; 359 bp->b_npages = 0; 360 361 if (vp->v_maxio == 0) 362 vp->v_maxio = DFLTPHYS; 363 inc = btodb(size); 364 for (bn = blkno, i = 0; i < run; ++i, bn += inc) { 365 if (i != 0) { 366 if ((bp->b_npages * PAGE_SIZE) + 367 round_page(size) > vp->v_maxio) 368 break; 369 370 if (incore(vp, lbn + i)) 371 break; 372 373 tbp = getblk(vp, lbn + i, size, 0, 0); 374 375 if ((tbp->b_flags & B_CACHE) || 376 (tbp->b_flags & B_VMIO) == 0) { 377 bqrelse(tbp); 378 break; 379 } 380 381 for (j=0;j<tbp->b_npages;j++) { 382 if (tbp->b_pages[j]->valid) { 383 break; 384 } 385 } 386 387 if (j != tbp->b_npages) { 388 /* 389 * force buffer to be re-constituted later 390 */ 391 tbp->b_flags |= B_RELBUF; 392 brelse(tbp); 393 break; 394 } 395 396 if ((fbp && (i == 1)) || (i == (run - 1))) 397 tbp->b_flags |= B_RAM; 398 tbp->b_flags |= B_READ | B_ASYNC; 399 if (tbp->b_blkno == tbp->b_lblkno) { 400 tbp->b_blkno = bn; 401 } else if (tbp->b_blkno != bn) { 402 brelse(tbp); 403 break; 404 } 405 } 406 TAILQ_INSERT_TAIL(&bp->b_cluster.cluster_head, 407 tbp, b_cluster.cluster_entry); 408 for (j = 0; j < tbp->b_npages; j += 1) { 409 vm_page_t m; 410 m = tbp->b_pages[j]; 411 ++m->busy; 412 ++m->object->paging_in_progress; 413 if ((bp->b_npages == 0) || 414 (bp->b_pages[bp->b_npages-1] != m)) { 415 bp->b_pages[bp->b_npages] = m; 416 bp->b_npages++; 417 } 418 if ((m->valid & VM_PAGE_BITS_ALL) == VM_PAGE_BITS_ALL) 419 tbp->b_pages[j] = bogus_page; 420 } 421 bp->b_bcount += tbp->b_bcount; 422 bp->b_bufsize += tbp->b_bufsize; 423 } 424 425 for(j=0;j<bp->b_npages;j++) { 426 if ((bp->b_pages[j]->valid & VM_PAGE_BITS_ALL) == 427 VM_PAGE_BITS_ALL) 428 bp->b_pages[j] = bogus_page; 429 } 430 if (bp->b_bufsize > bp->b_kvasize) 431 panic("cluster_rbuild: b_bufsize(%d) > b_kvasize(%d)\n", 432 bp->b_bufsize, bp->b_kvasize); 433 bp->b_kvasize = bp->b_bufsize; 434 435 pmap_qenter(trunc_page((vm_offset_t) bp->b_data), 436 (vm_page_t *)bp->b_pages, bp->b_npages); 437 return (bp); 438} 439 440/* 441 * Cleanup after a clustered read or write. 442 * This is complicated by the fact that any of the buffers might have 443 * extra memory (if there were no empty buffer headers at allocbuf time) 444 * that we will need to shift around. 445 */ 446void 447cluster_callback(bp) 448 struct buf *bp; 449{ 450 struct buf *nbp, *tbp; 451 int error = 0; 452 453 /* 454 * Must propogate errors to all the components. 455 */ 456 if (bp->b_flags & B_ERROR) 457 error = bp->b_error; 458 459 pmap_qremove(trunc_page((vm_offset_t) bp->b_data), bp->b_npages); 460 /* 461 * Move memory from the large cluster buffer into the component 462 * buffers and mark IO as done on these. 463 */ 464 for (tbp = TAILQ_FIRST(&bp->b_cluster.cluster_head); 465 tbp; tbp = nbp) { 466 nbp = TAILQ_NEXT(&tbp->b_cluster, cluster_entry); 467 if (error) { 468 tbp->b_flags |= B_ERROR; 469 tbp->b_error = error; 470 } else 471 tbp->b_dirtyoff = tbp->b_dirtyend = 0; 472 biodone(tbp); 473 } 474 relpbuf(bp); 475} 476 477/* 478 * Do clustered write for FFS. 479 * 480 * Three cases: 481 * 1. Write is not sequential (write asynchronously) 482 * Write is sequential: 483 * 2. beginning of cluster - begin cluster 484 * 3. middle of a cluster - add to cluster 485 * 4. end of a cluster - asynchronously write cluster 486 */ 487void 488cluster_write(bp, filesize) 489 struct buf *bp; 490 u_quad_t filesize; 491{ 492 struct vnode *vp; 493 daddr_t lbn; 494 int maxclen, cursize; 495 int lblocksize; 496 int async; 497 498 vp = bp->b_vp; 499 if (vp->v_maxio == 0) 500 vp->v_maxio = DFLTPHYS; 501 if (vp->v_type == VREG) { 502 async = vp->v_mount->mnt_flag & MNT_ASYNC; 503 lblocksize = vp->v_mount->mnt_stat.f_iosize; 504 } else { 505 async = 0; 506 lblocksize = bp->b_bufsize; 507 } 508 lbn = bp->b_lblkno; 509 510 /* Initialize vnode to beginning of file. */ 511 if (lbn == 0) 512 vp->v_lasta = vp->v_clen = vp->v_cstart = vp->v_lastw = 0; 513 514 if (vp->v_clen == 0 || lbn != vp->v_lastw + 1 || 515 (bp->b_blkno != vp->v_lasta + btodb(lblocksize))) { 516 maxclen = vp->v_maxio / lblocksize - 1; 517 if (vp->v_clen != 0) { 518 /* 519 * Next block is not sequential. 520 * 521 * If we are not writing at end of file, the process 522 * seeked to another point in the file since its last 523 * write, or we have reached our maximum cluster size, 524 * then push the previous cluster. Otherwise try 525 * reallocating to make it sequential. 526 */ 527 cursize = vp->v_lastw - vp->v_cstart + 1; 528#ifndef notyet_block_reallocation_enabled 529 if (((u_quad_t)(lbn + 1) * lblocksize) != filesize || 530 lbn != vp->v_lastw + 1 || 531 vp->v_clen <= cursize) { 532 if (!async) 533 cluster_wbuild(vp, lblocksize, 534 vp->v_cstart, cursize); 535 } 536#else 537 if ((lbn + 1) * lblocksize != filesize || 538 lbn != vp->v_lastw + 1 || vp->v_clen <= cursize) { 539 if (!async) 540 cluster_wbuild(vp, lblocksize, 541 vp->v_cstart, cursize); 542 } else { 543 struct buf **bpp, **endbp; 544 struct cluster_save *buflist; 545 546 buflist = cluster_collectbufs(vp, bp); 547 endbp = &buflist->bs_children 548 [buflist->bs_nchildren - 1]; 549 if (VOP_REALLOCBLKS(vp, buflist)) { 550 /* 551 * Failed, push the previous cluster. 552 */ 553 for (bpp = buflist->bs_children; 554 bpp < endbp; bpp++) 555 brelse(*bpp); 556 free(buflist, M_SEGMENT); 557 cluster_wbuild(vp, lblocksize, 558 vp->v_cstart, cursize); 559 } else { 560 /* 561 * Succeeded, keep building cluster. 562 */ 563 for (bpp = buflist->bs_children; 564 bpp <= endbp; bpp++) 565 bdwrite(*bpp); 566 free(buflist, M_SEGMENT); 567 vp->v_lastw = lbn; 568 vp->v_lasta = bp->b_blkno; 569 return; 570 } 571 } 572#endif /* notyet_block_reallocation_enabled */ 573 } 574 /* 575 * Consider beginning a cluster. If at end of file, make 576 * cluster as large as possible, otherwise find size of 577 * existing cluster. 578 */ 579 if ((vp->v_type == VREG) && 580 ((u_quad_t) (lbn + 1) * lblocksize) != filesize && 581 (bp->b_blkno == bp->b_lblkno) && 582 (VOP_BMAP(vp, lbn, NULL, &bp->b_blkno, &maxclen, NULL) || 583 bp->b_blkno == -1)) { 584 bawrite(bp); 585 vp->v_clen = 0; 586 vp->v_lasta = bp->b_blkno; 587 vp->v_cstart = lbn + 1; 588 vp->v_lastw = lbn; 589 return; 590 } 591 vp->v_clen = maxclen; 592 if (!async && maxclen == 0) { /* I/O not contiguous */ 593 vp->v_cstart = lbn + 1; 594 bawrite(bp); 595 } else { /* Wait for rest of cluster */ 596 vp->v_cstart = lbn; 597 bdwrite(bp); 598 } 599 } else if (lbn == vp->v_cstart + vp->v_clen) { 600 /* 601 * At end of cluster, write it out. 602 */ 603 bdwrite(bp); 604 cluster_wbuild(vp, lblocksize, vp->v_cstart, vp->v_clen + 1); 605 vp->v_clen = 0; 606 vp->v_cstart = lbn + 1; 607 } else 608 /* 609 * In the middle of a cluster, so just delay the I/O for now. 610 */ 611 bdwrite(bp); 612 vp->v_lastw = lbn; 613 vp->v_lasta = bp->b_blkno; 614} 615 616 617/* 618 * This is an awful lot like cluster_rbuild...wish they could be combined. 619 * The last lbn argument is the current block on which I/O is being 620 * performed. Check to see that it doesn't fall in the middle of 621 * the current block (if last_bp == NULL). 622 */ 623int 624cluster_wbuild(vp, size, start_lbn, len) 625 struct vnode *vp; 626 long size; 627 daddr_t start_lbn; 628 int len; 629{ 630 struct buf *bp, *tbp; 631 int i, j, s; 632 int totalwritten = 0; 633 int dbsize = btodb(size); 634 while (len > 0) { 635 s = splbio(); 636 if (((tbp = gbincore(vp, start_lbn)) == NULL) || 637 ((tbp->b_flags & (B_INVAL|B_BUSY|B_DELWRI)) != B_DELWRI)) { 638 ++start_lbn; 639 --len; 640 splx(s); 641 continue; 642 } 643 bremfree(tbp); 644 tbp->b_flags |= B_BUSY; 645 tbp->b_flags &= ~B_DONE; 646 splx(s); 647 648 /* 649 * Extra memory in the buffer, punt on this buffer. XXX we could 650 * handle this in most cases, but we would have to push the extra 651 * memory down to after our max possible cluster size and then 652 * potentially pull it back up if the cluster was terminated 653 * prematurely--too much hassle. 654 */ 655 if (((tbp->b_flags & (B_CLUSTEROK|B_MALLOC)) != B_CLUSTEROK) || 656 (tbp->b_bcount != tbp->b_bufsize) || 657 (tbp->b_bcount != size) || 658 len == 1) { 659 totalwritten += tbp->b_bufsize; 660 bawrite(tbp); 661 ++start_lbn; 662 --len; 663 continue; 664 } 665 666 bp = trypbuf(); 667 if (bp == NULL) { 668 totalwritten += tbp->b_bufsize; 669 bawrite(tbp); 670 ++start_lbn; 671 --len; 672 continue; 673 } 674 675 TAILQ_INIT(&bp->b_cluster.cluster_head); 676 bp->b_bcount = 0; 677 bp->b_bufsize = 0; 678 bp->b_npages = 0; 679 if (tbp->b_wcred != NOCRED) { 680 bp->b_wcred = tbp->b_wcred; 681 crhold(bp->b_wcred); 682 } 683 684 bp->b_blkno = tbp->b_blkno; 685 bp->b_lblkno = tbp->b_lblkno; 686 (vm_offset_t) bp->b_data |= ((vm_offset_t) tbp->b_data) & PAGE_MASK; 687 bp->b_flags |= B_CALL | B_BUSY | B_CLUSTER | 688 (tbp->b_flags & (B_VMIO|B_NEEDCOMMIT)); 689 bp->b_iodone = cluster_callback; 690 pbgetvp(vp, bp); 691 692 for (i = 0; i < len; ++i, ++start_lbn) { 693 if (i != 0) { 694 s = splbio(); 695 if ((tbp = gbincore(vp, start_lbn)) == NULL) { 696 splx(s); 697 break; 698 } 699 700 if ((tbp->b_flags & (B_VMIO|B_CLUSTEROK|B_INVAL|B_BUSY|B_DELWRI|B_NEEDCOMMIT)) != (B_DELWRI|B_CLUSTEROK|(bp->b_flags & (B_VMIO|B_NEEDCOMMIT)))) { 701 splx(s); 702 break; 703 } 704 705 if (tbp->b_wcred != bp->b_wcred) { 706 splx(s); 707 break; 708 } 709 710 if ((tbp->b_bcount != size) || 711 ((bp->b_blkno + dbsize * i) != tbp->b_blkno) || 712 ((tbp->b_npages + bp->b_npages) > (vp->v_maxio / PAGE_SIZE))) { 713 splx(s); 714 break; 715 } 716 bremfree(tbp); 717 tbp->b_flags |= B_BUSY; 718 tbp->b_flags &= ~B_DONE; 719 splx(s); 720 } 721 if (tbp->b_flags & B_VMIO) { 722 for (j = 0; j < tbp->b_npages; j += 1) { 723 vm_page_t m; 724 m = tbp->b_pages[j]; 725 ++m->busy; 726 ++m->object->paging_in_progress; 727 if ((bp->b_npages == 0) || 728 (bp->b_pages[bp->b_npages - 1] != m)) { 729 bp->b_pages[bp->b_npages] = m; 730 bp->b_npages++; 731 } 732 } 733 } 734 bp->b_bcount += size; 735 bp->b_bufsize += size; 736 737 --numdirtybuffers; 738 tbp->b_flags &= ~(B_READ | B_DONE | B_ERROR | B_DELWRI); 739 tbp->b_flags |= B_ASYNC; 740 s = splbio(); 741 reassignbuf(tbp, tbp->b_vp); /* put on clean list */ 742 ++tbp->b_vp->v_numoutput; 743 splx(s); 744 TAILQ_INSERT_TAIL(&bp->b_cluster.cluster_head, 745 tbp, b_cluster.cluster_entry); 746 } 747 pmap_qenter(trunc_page((vm_offset_t) bp->b_data), 748 (vm_page_t *) bp->b_pages, bp->b_npages); 749 if (bp->b_bufsize > bp->b_kvasize) 750 panic("cluster_wbuild: b_bufsize(%d) > b_kvasize(%d)\n", 751 bp->b_bufsize, bp->b_kvasize); 752 bp->b_kvasize = bp->b_bufsize; 753 totalwritten += bp->b_bufsize; 754 bp->b_dirtyoff = 0; 755 bp->b_dirtyend = bp->b_bufsize; 756 bawrite(bp); 757 758 len -= i; 759 } 760 return totalwritten; 761} 762 763#ifdef notyet_block_reallocation_enabled 764/* 765 * Collect together all the buffers in a cluster. 766 * Plus add one additional buffer. 767 */ 768static struct cluster_save * 769cluster_collectbufs(vp, last_bp) 770 struct vnode *vp; 771 struct buf *last_bp; 772{ 773 struct cluster_save *buflist; 774 daddr_t lbn; 775 int i, len; 776 777 len = vp->v_lastw - vp->v_cstart + 1; 778 buflist = malloc(sizeof(struct buf *) * (len + 1) + sizeof(*buflist), 779 M_SEGMENT, M_WAITOK); 780 buflist->bs_nchildren = 0; 781 buflist->bs_children = (struct buf **) (buflist + 1); 782 for (lbn = vp->v_cstart, i = 0; i < len; lbn++, i++) 783 (void) bread(vp, lbn, last_bp->b_bcount, NOCRED, 784 &buflist->bs_children[i]); 785 buflist->bs_children[i] = last_bp; 786 buflist->bs_nchildren = i + 1; 787 return (buflist); 788} 789#endif /* notyet_block_reallocation_enabled */ 790