vfs_cluster.c revision 37384
1/*- 2 * Copyright (c) 1993 3 * The Regents of the University of California. All rights reserved. 4 * Modifications/enhancements: 5 * Copyright (c) 1995 John S. Dyson. All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 3. All advertising materials mentioning features or use of this software 16 * must display the following acknowledgement: 17 * This product includes software developed by the University of 18 * California, Berkeley and its contributors. 19 * 4. Neither the name of the University nor the names of its contributors 20 * may be used to endorse or promote products derived from this software 21 * without specific prior written permission. 22 * 23 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 24 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 25 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 26 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 27 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 28 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 29 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 30 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 31 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 32 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 33 * SUCH DAMAGE. 34 * 35 * @(#)vfs_cluster.c 8.7 (Berkeley) 2/13/94 36 * $Id: vfs_cluster.c,v 1.62 1998/05/21 07:47:42 dyson Exp $ 37 */ 38 39#include "opt_debug_cluster.h" 40 41#include <sys/param.h> 42#include <sys/systm.h> 43#include <sys/proc.h> 44#include <sys/buf.h> 45#include <sys/vnode.h> 46#include <sys/mount.h> 47#include <sys/resourcevar.h> 48#include <vm/vm.h> 49#include <vm/vm_prot.h> 50#include <vm/vm_object.h> 51#include <vm/vm_page.h> 52 53#if defined(CLUSTERDEBUG) 54#include <sys/sysctl.h> 55#include <sys/kernel.h> 56static int rcluster= 0; 57SYSCTL_INT(_debug, OID_AUTO, rcluster, CTLFLAG_RW, &rcluster, 0, ""); 58#endif 59 60#ifdef notyet_block_reallocation_enabled 61static struct cluster_save * 62 cluster_collectbufs __P((struct vnode *vp, struct buf *last_bp)); 63#endif 64static struct buf * 65 cluster_rbuild __P((struct vnode *vp, u_quad_t filesize, daddr_t lbn, 66 daddr_t blkno, long size, int run, struct buf *fbp)); 67 68extern vm_page_t bogus_page; 69 70/* 71 * Maximum number of blocks for read-ahead. 72 */ 73#define MAXRA 32 74 75/* 76 * This replaces bread. 77 */ 78int 79cluster_read(vp, filesize, lblkno, size, cred, totread, seqcount, bpp) 80 struct vnode *vp; 81 u_quad_t filesize; 82 daddr_t lblkno; 83 long size; 84 struct ucred *cred; 85 long totread; 86 int seqcount; 87 struct buf **bpp; 88{ 89 struct buf *bp, *rbp, *reqbp; 90 daddr_t blkno, origblkno; 91 int error, num_ra; 92 int i; 93 int maxra, racluster; 94 long origtotread; 95 96 error = 0; 97 if (vp->v_maxio == 0) 98 vp->v_maxio = DFLTPHYS; 99 100 /* 101 * Try to limit the amount of read-ahead by a few 102 * ad-hoc parameters. This needs work!!! 103 */ 104 racluster = vp->v_maxio/size; 105 maxra = 2 * racluster + (totread / size); 106 if (maxra > MAXRA) 107 maxra = MAXRA; 108 if (maxra > nbuf/8) 109 maxra = nbuf/8; 110 111 /* 112 * get the requested block 113 */ 114 *bpp = reqbp = bp = getblk(vp, lblkno, size, 0, 0); 115 origblkno = lblkno; 116 origtotread = totread; 117 118 /* 119 * if it is in the cache, then check to see if the reads have been 120 * sequential. If they have, then try some read-ahead, otherwise 121 * back-off on prospective read-aheads. 122 */ 123 if (bp->b_flags & B_CACHE) { 124 if (!seqcount) { 125 return 0; 126 } else if ((bp->b_flags & B_RAM) == 0) { 127 return 0; 128 } else { 129 int s; 130 struct buf *tbp; 131 bp->b_flags &= ~B_RAM; 132 /* 133 * We do the spl here so that there is no window 134 * between the incore and the b_usecount increment 135 * below. We opt to keep the spl out of the loop 136 * for efficiency. 137 */ 138 s = splbio(); 139 for(i=1;i<maxra;i++) { 140 141 if (!(tbp = incore(vp, lblkno+i))) { 142 break; 143 } 144 145 /* 146 * Set another read-ahead mark so we know to check 147 * again. 148 */ 149 if (((i % racluster) == (racluster - 1)) || 150 (i == (maxra - 1))) 151 tbp->b_flags |= B_RAM; 152 153 if ((tbp->b_usecount < 1) && 154 ((tbp->b_flags & B_BUSY) == 0) && 155 (tbp->b_qindex == QUEUE_LRU)) { 156 TAILQ_REMOVE(&bufqueues[QUEUE_LRU], tbp, b_freelist); 157 TAILQ_INSERT_TAIL(&bufqueues[QUEUE_LRU], tbp, b_freelist); 158 } 159 } 160 splx(s); 161 if (i >= maxra) { 162 return 0; 163 } 164 lblkno += i; 165 } 166 reqbp = bp = NULL; 167 } else { 168 off_t firstread; 169 firstread = bp->b_offset; 170#ifdef DIAGNOSTIC 171 if (bp->b_offset == NOOFFSET) 172 panic("cluster_read: no buffer offset"); 173#endif 174 if (firstread + totread > filesize) 175 totread = filesize - firstread; 176 if (totread > size) { 177 int nblks = 0; 178 int ncontigafter; 179 while (totread > 0) { 180 nblks++; 181 totread -= size; 182 } 183 if (nblks == 1) 184 goto single_block_read; 185 if (nblks > racluster) 186 nblks = racluster; 187 188 error = VOP_BMAP(vp, lblkno, NULL, 189 &blkno, &ncontigafter, NULL); 190 if (error) 191 goto single_block_read; 192 if (blkno == -1) 193 goto single_block_read; 194 if (ncontigafter == 0) 195 goto single_block_read; 196 if (ncontigafter + 1 < nblks) 197 nblks = ncontigafter + 1; 198 199 bp = cluster_rbuild(vp, filesize, lblkno, 200 blkno, size, nblks, bp); 201 lblkno += (bp->b_bufsize / size); 202 } else { 203single_block_read: 204 /* 205 * if it isn't in the cache, then get a chunk from 206 * disk if sequential, otherwise just get the block. 207 */ 208 bp->b_flags |= B_READ | B_RAM; 209 lblkno += 1; 210 } 211 } 212 213 /* 214 * if we have been doing sequential I/O, then do some read-ahead 215 */ 216 rbp = NULL; 217 if (seqcount && (lblkno < (origblkno + seqcount))) { 218 /* 219 * we now build the read-ahead buffer if it is desirable. 220 */ 221 if (((u_quad_t)(lblkno + 1) * size) <= filesize && 222 !(error = VOP_BMAP(vp, lblkno, NULL, &blkno, &num_ra, NULL)) && 223 blkno != -1) { 224 int nblksread; 225 int ntoread = num_ra + 1; 226 nblksread = (origtotread + size - 1) / size; 227 if (seqcount < nblksread) 228 seqcount = nblksread; 229 if (seqcount < ntoread) 230 ntoread = seqcount; 231 if (num_ra) { 232 rbp = cluster_rbuild(vp, filesize, lblkno, 233 blkno, size, ntoread, NULL); 234 } else { 235 rbp = getblk(vp, lblkno, size, 0, 0); 236 rbp->b_flags |= B_READ | B_ASYNC | B_RAM; 237 rbp->b_blkno = blkno; 238 } 239 } 240 } 241 242 /* 243 * handle the synchronous read 244 */ 245 if (bp) { 246#if defined(CLUSTERDEBUG) 247 if (rcluster) 248 printf("S(%d,%d,%d) ", 249 bp->b_lblkno, bp->b_bcount, seqcount); 250#endif 251 if ((bp->b_flags & B_CLUSTER) == 0) 252 vfs_busy_pages(bp, 0); 253 error = VOP_STRATEGY(vp, bp); 254 curproc->p_stats->p_ru.ru_inblock++; 255 } 256 257 /* 258 * and if we have read-aheads, do them too 259 */ 260 if (rbp) { 261 if (error) { 262 rbp->b_flags &= ~(B_ASYNC | B_READ); 263 brelse(rbp); 264 } else if (rbp->b_flags & B_CACHE) { 265 rbp->b_flags &= ~(B_ASYNC | B_READ); 266 bqrelse(rbp); 267 } else { 268#if defined(CLUSTERDEBUG) 269 if (rcluster) { 270 if (bp) 271 printf("A+(%d,%d,%d,%d) ", 272 rbp->b_lblkno, rbp->b_bcount, 273 rbp->b_lblkno - origblkno, 274 seqcount); 275 else 276 printf("A(%d,%d,%d,%d) ", 277 rbp->b_lblkno, rbp->b_bcount, 278 rbp->b_lblkno - origblkno, 279 seqcount); 280 } 281#endif 282 283 if ((rbp->b_flags & B_CLUSTER) == 0) 284 vfs_busy_pages(rbp, 0); 285 (void) VOP_STRATEGY(vp, rbp); 286 curproc->p_stats->p_ru.ru_inblock++; 287 } 288 } 289 if (reqbp) 290 return (biowait(reqbp)); 291 else 292 return (error); 293} 294 295/* 296 * If blocks are contiguous on disk, use this to provide clustered 297 * read ahead. We will read as many blocks as possible sequentially 298 * and then parcel them up into logical blocks in the buffer hash table. 299 */ 300static struct buf * 301cluster_rbuild(vp, filesize, lbn, blkno, size, run, fbp) 302 struct vnode *vp; 303 u_quad_t filesize; 304 daddr_t lbn; 305 daddr_t blkno; 306 long size; 307 int run; 308 struct buf *fbp; 309{ 310 struct buf *bp, *tbp; 311 daddr_t bn; 312 int i, inc, j; 313 314#ifdef DIAGNOSTIC 315 if (size != vp->v_mount->mnt_stat.f_iosize) 316 panic("cluster_rbuild: size %d != filesize %d\n", 317 size, vp->v_mount->mnt_stat.f_iosize); 318#endif 319 /* 320 * avoid a division 321 */ 322 while ((u_quad_t) size * (lbn + run) > filesize) { 323 --run; 324 } 325 326 if (fbp) { 327 tbp = fbp; 328 tbp->b_flags |= B_READ; 329 } else { 330 tbp = getblk(vp, lbn, size, 0, 0); 331 if (tbp->b_flags & B_CACHE) 332 return tbp; 333 tbp->b_flags |= B_ASYNC | B_READ | B_RAM; 334 } 335 336 tbp->b_blkno = blkno; 337 if( (tbp->b_flags & B_MALLOC) || 338 ((tbp->b_flags & B_VMIO) == 0) || (run <= 1) ) 339 return tbp; 340 341 bp = trypbuf(); 342 if (bp == 0) 343 return tbp; 344 345 (vm_offset_t) bp->b_data |= ((vm_offset_t) tbp->b_data) & PAGE_MASK; 346 bp->b_flags = B_ASYNC | B_READ | B_CALL | B_BUSY | B_CLUSTER | B_VMIO; 347 bp->b_iodone = cluster_callback; 348 bp->b_blkno = blkno; 349 bp->b_lblkno = lbn; 350 bp->b_offset = tbp->b_offset; 351#ifdef DIAGNOSTIC 352 if (bp->b_offset == NOOFFSET) 353 panic("cluster_rbuild: no buffer offset"); 354#endif 355 pbgetvp(vp, bp); 356 357 TAILQ_INIT(&bp->b_cluster.cluster_head); 358 359 bp->b_bcount = 0; 360 bp->b_bufsize = 0; 361 bp->b_npages = 0; 362 363 if (vp->v_maxio == 0) 364 vp->v_maxio = DFLTPHYS; 365 inc = btodb(size); 366 for (bn = blkno, i = 0; i < run; ++i, bn += inc) { 367 if (i != 0) { 368 if ((bp->b_npages * PAGE_SIZE) + 369 round_page(size) > vp->v_maxio) 370 break; 371 372 if (tbp = incore(vp, lbn + i)) { 373 if (tbp->b_flags & B_BUSY) 374 break; 375 376 for (j = 0; j < tbp->b_npages; j++) 377 if (tbp->b_pages[j]->valid) 378 break; 379 380 if (j != tbp->b_npages) 381 break; 382 383 if (tbp->b_bcount != size) 384 break; 385 } 386 387 tbp = getblk(vp, lbn + i, size, 0, 0); 388 389 if ((tbp->b_flags & B_CACHE) || 390 (tbp->b_flags & B_VMIO) == 0) { 391 bqrelse(tbp); 392 break; 393 } 394 395 for (j = 0;j < tbp->b_npages; j++) 396 if (tbp->b_pages[j]->valid) 397 break; 398 399 if (j != tbp->b_npages) { 400 bqrelse(tbp); 401 break; 402 } 403 404 if ((fbp && (i == 1)) || (i == (run - 1))) 405 tbp->b_flags |= B_RAM; 406 tbp->b_flags |= B_READ | B_ASYNC; 407 if (tbp->b_blkno == tbp->b_lblkno) { 408 tbp->b_blkno = bn; 409 } else if (tbp->b_blkno != bn) { 410 brelse(tbp); 411 break; 412 } 413 } 414 TAILQ_INSERT_TAIL(&bp->b_cluster.cluster_head, 415 tbp, b_cluster.cluster_entry); 416 for (j = 0; j < tbp->b_npages; j += 1) { 417 vm_page_t m; 418 m = tbp->b_pages[j]; 419 ++m->busy; 420 ++m->object->paging_in_progress; 421 if ((bp->b_npages == 0) || 422 (bp->b_pages[bp->b_npages-1] != m)) { 423 bp->b_pages[bp->b_npages] = m; 424 bp->b_npages++; 425 } 426 if ((m->valid & VM_PAGE_BITS_ALL) == VM_PAGE_BITS_ALL) 427 tbp->b_pages[j] = bogus_page; 428 } 429 bp->b_bcount += tbp->b_bcount; 430 bp->b_bufsize += tbp->b_bufsize; 431 } 432 433 for(j=0;j<bp->b_npages;j++) { 434 if ((bp->b_pages[j]->valid & VM_PAGE_BITS_ALL) == 435 VM_PAGE_BITS_ALL) 436 bp->b_pages[j] = bogus_page; 437 } 438 if (bp->b_bufsize > bp->b_kvasize) 439 panic("cluster_rbuild: b_bufsize(%d) > b_kvasize(%d)\n", 440 bp->b_bufsize, bp->b_kvasize); 441 bp->b_kvasize = bp->b_bufsize; 442 443 pmap_qenter(trunc_page((vm_offset_t) bp->b_data), 444 (vm_page_t *)bp->b_pages, bp->b_npages); 445 return (bp); 446} 447 448/* 449 * Cleanup after a clustered read or write. 450 * This is complicated by the fact that any of the buffers might have 451 * extra memory (if there were no empty buffer headers at allocbuf time) 452 * that we will need to shift around. 453 */ 454void 455cluster_callback(bp) 456 struct buf *bp; 457{ 458 struct buf *nbp, *tbp; 459 int error = 0; 460 461 /* 462 * Must propogate errors to all the components. 463 */ 464 if (bp->b_flags & B_ERROR) 465 error = bp->b_error; 466 467 pmap_qremove(trunc_page((vm_offset_t) bp->b_data), bp->b_npages); 468 /* 469 * Move memory from the large cluster buffer into the component 470 * buffers and mark IO as done on these. 471 */ 472 for (tbp = TAILQ_FIRST(&bp->b_cluster.cluster_head); 473 tbp; tbp = nbp) { 474 nbp = TAILQ_NEXT(&tbp->b_cluster, cluster_entry); 475 if (error) { 476 tbp->b_flags |= B_ERROR; 477 tbp->b_error = error; 478 } else 479 tbp->b_dirtyoff = tbp->b_dirtyend = 0; 480 biodone(tbp); 481 } 482 relpbuf(bp); 483} 484 485/* 486 * Do clustered write for FFS. 487 * 488 * Three cases: 489 * 1. Write is not sequential (write asynchronously) 490 * Write is sequential: 491 * 2. beginning of cluster - begin cluster 492 * 3. middle of a cluster - add to cluster 493 * 4. end of a cluster - asynchronously write cluster 494 */ 495void 496cluster_write(bp, filesize) 497 struct buf *bp; 498 u_quad_t filesize; 499{ 500 struct vnode *vp; 501 daddr_t lbn; 502 int maxclen, cursize; 503 int lblocksize; 504 int async; 505 506 vp = bp->b_vp; 507 if (vp->v_maxio == 0) 508 vp->v_maxio = DFLTPHYS; 509 if (vp->v_type == VREG) { 510 async = vp->v_mount->mnt_flag & MNT_ASYNC; 511 lblocksize = vp->v_mount->mnt_stat.f_iosize; 512 } else { 513 async = 0; 514 lblocksize = bp->b_bufsize; 515 } 516 lbn = bp->b_lblkno; 517 518#ifdef DIAGNOSTIC 519 if (bp->b_offset == NOOFFSET) 520 panic("cluster_write: no buffer offset"); 521#endif 522 523 /* Initialize vnode to beginning of file. */ 524 if (lbn == 0) 525 vp->v_lasta = vp->v_clen = vp->v_cstart = vp->v_lastw = 0; 526 527 if (vp->v_clen == 0 || lbn != vp->v_lastw + 1 || 528 (bp->b_blkno != vp->v_lasta + btodb(lblocksize))) { 529 maxclen = vp->v_maxio / lblocksize - 1; 530 if (vp->v_clen != 0) { 531 /* 532 * Next block is not sequential. 533 * 534 * If we are not writing at end of file, the process 535 * seeked to another point in the file since its last 536 * write, or we have reached our maximum cluster size, 537 * then push the previous cluster. Otherwise try 538 * reallocating to make it sequential. 539 */ 540 cursize = vp->v_lastw - vp->v_cstart + 1; 541#ifndef notyet_block_reallocation_enabled 542 if (((u_quad_t) bp->b_offset + lblocksize) != filesize || 543 lbn != vp->v_lastw + 1 || 544 vp->v_clen <= cursize) { 545 if (!async) 546 cluster_wbuild(vp, lblocksize, 547 vp->v_cstart, cursize); 548 } 549#else 550 if ((lbn + 1) * lblocksize != filesize || 551 lbn != vp->v_lastw + 1 || vp->v_clen <= cursize) { 552 if (!async) 553 cluster_wbuild(vp, lblocksize, 554 vp->v_cstart, cursize); 555 } else { 556 struct buf **bpp, **endbp; 557 struct cluster_save *buflist; 558 559 buflist = cluster_collectbufs(vp, bp); 560 endbp = &buflist->bs_children 561 [buflist->bs_nchildren - 1]; 562 if (VOP_REALLOCBLKS(vp, buflist)) { 563 /* 564 * Failed, push the previous cluster. 565 */ 566 for (bpp = buflist->bs_children; 567 bpp < endbp; bpp++) 568 brelse(*bpp); 569 free(buflist, M_SEGMENT); 570 cluster_wbuild(vp, lblocksize, 571 vp->v_cstart, cursize); 572 } else { 573 /* 574 * Succeeded, keep building cluster. 575 */ 576 for (bpp = buflist->bs_children; 577 bpp <= endbp; bpp++) 578 bdwrite(*bpp); 579 free(buflist, M_SEGMENT); 580 vp->v_lastw = lbn; 581 vp->v_lasta = bp->b_blkno; 582 return; 583 } 584 } 585#endif /* notyet_block_reallocation_enabled */ 586 } 587 /* 588 * Consider beginning a cluster. If at end of file, make 589 * cluster as large as possible, otherwise find size of 590 * existing cluster. 591 */ 592 if ((vp->v_type == VREG) && 593 ((u_quad_t) bp->b_offset + lblocksize) != filesize && 594 (bp->b_blkno == bp->b_lblkno) && 595 (VOP_BMAP(vp, lbn, NULL, &bp->b_blkno, &maxclen, NULL) || 596 bp->b_blkno == -1)) { 597 bawrite(bp); 598 vp->v_clen = 0; 599 vp->v_lasta = bp->b_blkno; 600 vp->v_cstart = lbn + 1; 601 vp->v_lastw = lbn; 602 return; 603 } 604 vp->v_clen = maxclen; 605 if (!async && maxclen == 0) { /* I/O not contiguous */ 606 vp->v_cstart = lbn + 1; 607 bawrite(bp); 608 } else { /* Wait for rest of cluster */ 609 vp->v_cstart = lbn; 610 bdwrite(bp); 611 } 612 } else if (lbn == vp->v_cstart + vp->v_clen) { 613 /* 614 * At end of cluster, write it out. 615 */ 616 bdwrite(bp); 617 cluster_wbuild(vp, lblocksize, vp->v_cstart, vp->v_clen + 1); 618 vp->v_clen = 0; 619 vp->v_cstart = lbn + 1; 620 } else 621 /* 622 * In the middle of a cluster, so just delay the I/O for now. 623 */ 624 bdwrite(bp); 625 vp->v_lastw = lbn; 626 vp->v_lasta = bp->b_blkno; 627} 628 629 630/* 631 * This is an awful lot like cluster_rbuild...wish they could be combined. 632 * The last lbn argument is the current block on which I/O is being 633 * performed. Check to see that it doesn't fall in the middle of 634 * the current block (if last_bp == NULL). 635 */ 636int 637cluster_wbuild(vp, size, start_lbn, len) 638 struct vnode *vp; 639 long size; 640 daddr_t start_lbn; 641 int len; 642{ 643 struct buf *bp, *tbp; 644 int i, j, s; 645 int totalwritten = 0; 646 int dbsize = btodb(size); 647 648 if (vp->v_maxio == 0) 649 vp->v_maxio = DFLTPHYS; 650 while (len > 0) { 651 s = splbio(); 652 if (((tbp = gbincore(vp, start_lbn)) == NULL) || 653 ((tbp->b_flags & (B_INVAL|B_BUSY|B_DELWRI)) != B_DELWRI)) { 654 ++start_lbn; 655 --len; 656 splx(s); 657 continue; 658 } 659 bremfree(tbp); 660 tbp->b_flags |= B_BUSY; 661 tbp->b_flags &= ~B_DONE; 662 splx(s); 663 664 /* 665 * Extra memory in the buffer, punt on this buffer. XXX we could 666 * handle this in most cases, but we would have to push the extra 667 * memory down to after our max possible cluster size and then 668 * potentially pull it back up if the cluster was terminated 669 * prematurely--too much hassle. 670 */ 671 if (((tbp->b_flags & (B_CLUSTEROK|B_MALLOC)) != B_CLUSTEROK) || 672 (tbp->b_bcount != tbp->b_bufsize) || 673 (tbp->b_bcount != size) || 674 (len == 1) || 675 ((bp = trypbuf()) == NULL)) { 676 totalwritten += tbp->b_bufsize; 677 bawrite(tbp); 678 ++start_lbn; 679 --len; 680 continue; 681 } 682 683 /* 684 * We got a pbuf to make the cluster in. 685 * so initialise it. 686 */ 687 TAILQ_INIT(&bp->b_cluster.cluster_head); 688 bp->b_bcount = 0; 689 bp->b_bufsize = 0; 690 bp->b_npages = 0; 691 if (tbp->b_wcred != NOCRED) { 692 bp->b_wcred = tbp->b_wcred; 693 crhold(bp->b_wcred); 694 } 695 696 bp->b_blkno = tbp->b_blkno; 697 bp->b_lblkno = tbp->b_lblkno; 698 bp->b_offset = tbp->b_offset; 699 (vm_offset_t) bp->b_data |= 700 ((vm_offset_t) tbp->b_data) & PAGE_MASK; 701 bp->b_flags |= B_CALL | B_BUSY | B_CLUSTER | 702 (tbp->b_flags & (B_VMIO | B_NEEDCOMMIT)); 703 bp->b_iodone = cluster_callback; 704 pbgetvp(vp, bp); 705 /* 706 * From this location in the file, scan forward to see 707 * if there are buffers with adjacent data that need to 708 * be written as well. 709 */ 710 for (i = 0; i < len; ++i, ++start_lbn) { 711 if (i != 0) { /* If not the first buffer */ 712 s = splbio(); 713 /* 714 * If the adjacent data is not even in core it 715 * can't need to be written. 716 */ 717 if ((tbp = gbincore(vp, start_lbn)) == NULL) { 718 splx(s); 719 break; 720 } 721 722 /* 723 * If it IS in core, but has different 724 * characteristics, don't cluster with it. 725 */ 726 if ((tbp->b_flags & 727 (B_VMIO | B_CLUSTEROK | B_INVAL | B_BUSY | 728 B_DELWRI | B_NEEDCOMMIT)) 729 != (B_DELWRI | B_CLUSTEROK | 730 (bp->b_flags & (B_VMIO | B_NEEDCOMMIT)))) { 731 splx(s); 732 break; 733 } 734 735 if (tbp->b_wcred != bp->b_wcred) { 736 splx(s); 737 break; 738 } 739 740 /* 741 * Check that the combined cluster 742 * would make sense with regard to pages 743 * and would not be too large 744 */ 745 if ((tbp->b_bcount != size) || 746 ((bp->b_blkno + (dbsize * i)) != 747 tbp->b_blkno) || 748 ((tbp->b_npages + bp->b_npages) > 749 (vp->v_maxio / PAGE_SIZE))) { 750 splx(s); 751 break; 752 } 753 /* 754 * Ok, it's passed all the tests, 755 * so remove it from the free list 756 * and mark it busy. We will use it. 757 */ 758 bremfree(tbp); 759 tbp->b_flags |= B_BUSY; 760 tbp->b_flags &= ~B_DONE; 761 splx(s); 762 } /* end of code for non-first buffers only */ 763 /* check for latent dependencies to be handled */ 764 if ((LIST_FIRST(&tbp->b_dep)) != NULL && 765 bioops.io_start) 766 (*bioops.io_start)(tbp); 767 /* 768 * If the IO is via the VM then we do some 769 * special VM hackery. (yuck) 770 */ 771 if (tbp->b_flags & B_VMIO) { 772 vm_page_t m; 773 774 if (i != 0) { /* if not first buffer */ 775 for (j = 0; j < tbp->b_npages; j += 1) { 776 m = tbp->b_pages[j]; 777 if (m->flags & PG_BUSY) 778 goto finishcluster; 779 } 780 } 781 782 for (j = 0; j < tbp->b_npages; j += 1) { 783 m = tbp->b_pages[j]; 784 ++m->busy; 785 ++m->object->paging_in_progress; 786 if ((bp->b_npages == 0) || 787 (bp->b_pages[bp->b_npages - 1] != m)) { 788 bp->b_pages[bp->b_npages] = m; 789 bp->b_npages++; 790 } 791 } 792 } 793 bp->b_bcount += size; 794 bp->b_bufsize += size; 795 796 --numdirtybuffers; 797 tbp->b_flags &= ~(B_READ | B_DONE | B_ERROR | B_DELWRI); 798 tbp->b_flags |= B_ASYNC; 799 reassignbuf(tbp, tbp->b_vp); /* put on clean list */ 800 ++tbp->b_vp->v_numoutput; 801 TAILQ_INSERT_TAIL(&bp->b_cluster.cluster_head, 802 tbp, b_cluster.cluster_entry); 803 } 804 finishcluster: 805 pmap_qenter(trunc_page((vm_offset_t) bp->b_data), 806 (vm_page_t *) bp->b_pages, bp->b_npages); 807 if (bp->b_bufsize > bp->b_kvasize) 808 panic("cluster_wbuild: b_bufsize(%d) > b_kvasize(%d)\n", 809 bp->b_bufsize, bp->b_kvasize); 810 bp->b_kvasize = bp->b_bufsize; 811 totalwritten += bp->b_bufsize; 812 bp->b_dirtyoff = 0; 813 bp->b_dirtyend = bp->b_bufsize; 814 bawrite(bp); 815 816 len -= i; 817 } 818 return totalwritten; 819} 820 821#ifdef notyet_block_reallocation_enabled 822/* 823 * Collect together all the buffers in a cluster. 824 * Plus add one additional buffer. 825 */ 826static struct cluster_save * 827cluster_collectbufs(vp, last_bp) 828 struct vnode *vp; 829 struct buf *last_bp; 830{ 831 struct cluster_save *buflist; 832 daddr_t lbn; 833 int i, len; 834 835 len = vp->v_lastw - vp->v_cstart + 1; 836 buflist = malloc(sizeof(struct buf *) * (len + 1) + sizeof(*buflist), 837 M_SEGMENT, M_WAITOK); 838 buflist->bs_nchildren = 0; 839 buflist->bs_children = (struct buf **) (buflist + 1); 840 for (lbn = vp->v_cstart, i = 0; i < len; lbn++, i++) 841 (void) bread(vp, lbn, last_bp->b_bcount, NOCRED, 842 &buflist->bs_children[i]); 843 buflist->bs_children[i] = last_bp; 844 buflist->bs_nchildren = i + 1; 845 return (buflist); 846} 847#endif /* notyet_block_reallocation_enabled */ 848