vfs_cluster.c revision 50701
1/*- 2 * Copyright (c) 1993 3 * The Regents of the University of California. All rights reserved. 4 * Modifications/enhancements: 5 * Copyright (c) 1995 John S. Dyson. All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 3. All advertising materials mentioning features or use of this software 16 * must display the following acknowledgement: 17 * This product includes software developed by the University of 18 * California, Berkeley and its contributors. 19 * 4. Neither the name of the University nor the names of its contributors 20 * may be used to endorse or promote products derived from this software 21 * without specific prior written permission. 22 * 23 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 24 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 25 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 26 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 27 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 28 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 29 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 30 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 31 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 32 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 33 * SUCH DAMAGE. 34 * 35 * @(#)vfs_cluster.c 8.7 (Berkeley) 2/13/94 36 * $FreeBSD: head/sys/kern/vfs_cluster.c 50701 1999-08-31 14:18:32Z tegge $ 37 */ 38 39#include "opt_debug_cluster.h" 40 41#include <sys/param.h> 42#include <sys/systm.h> 43#include <sys/kernel.h> 44#include <sys/proc.h> 45#include <sys/buf.h> 46#include <sys/vnode.h> 47#include <sys/malloc.h> 48#include <sys/mount.h> 49#include <sys/resourcevar.h> 50#include <vm/vm.h> 51#include <vm/vm_prot.h> 52#include <vm/vm_object.h> 53#include <vm/vm_page.h> 54#include <sys/sysctl.h> 55 56#if defined(CLUSTERDEBUG) 57#include <sys/sysctl.h> 58static int rcluster= 0; 59SYSCTL_INT(_debug, OID_AUTO, rcluster, CTLFLAG_RW, &rcluster, 0, ""); 60#endif 61 62static MALLOC_DEFINE(M_SEGMENT, "cluster_save buffer", "cluster_save buffer"); 63 64static struct cluster_save * 65 cluster_collectbufs __P((struct vnode *vp, struct buf *last_bp)); 66static struct buf * 67 cluster_rbuild __P((struct vnode *vp, u_quad_t filesize, daddr_t lbn, 68 daddr_t blkno, long size, int run, struct buf *fbp)); 69 70static int write_behind = 1; 71SYSCTL_INT(_vfs, OID_AUTO, write_behind, CTLFLAG_RW, &write_behind, 0, ""); 72 73extern vm_page_t bogus_page; 74 75extern int cluster_pbuf_freecnt; 76 77/* 78 * Maximum number of blocks for read-ahead. 79 */ 80#define MAXRA 32 81 82/* 83 * This replaces bread. 84 */ 85int 86cluster_read(vp, filesize, lblkno, size, cred, totread, seqcount, bpp) 87 struct vnode *vp; 88 u_quad_t filesize; 89 daddr_t lblkno; 90 long size; 91 struct ucred *cred; 92 long totread; 93 int seqcount; 94 struct buf **bpp; 95{ 96 struct buf *bp, *rbp, *reqbp; 97 daddr_t blkno, origblkno; 98 int error, num_ra; 99 int i; 100 int maxra, racluster; 101 long origtotread; 102 103 error = 0; 104 if (vp->v_maxio == 0) 105 vp->v_maxio = DFLTPHYS; 106 107 /* 108 * Try to limit the amount of read-ahead by a few 109 * ad-hoc parameters. This needs work!!! 110 */ 111 racluster = vp->v_maxio/size; 112 maxra = 2 * racluster + (totread / size); 113 if (maxra > MAXRA) 114 maxra = MAXRA; 115 if (maxra > nbuf/8) 116 maxra = nbuf/8; 117 118 /* 119 * get the requested block 120 */ 121 *bpp = reqbp = bp = getblk(vp, lblkno, size, 0, 0); 122 origblkno = lblkno; 123 origtotread = totread; 124 125 /* 126 * if it is in the cache, then check to see if the reads have been 127 * sequential. If they have, then try some read-ahead, otherwise 128 * back-off on prospective read-aheads. 129 */ 130 if (bp->b_flags & B_CACHE) { 131 if (!seqcount) { 132 return 0; 133 } else if ((bp->b_flags & B_RAM) == 0) { 134 return 0; 135 } else { 136 int s; 137 struct buf *tbp; 138 bp->b_flags &= ~B_RAM; 139 /* 140 * We do the spl here so that there is no window 141 * between the incore and the b_usecount increment 142 * below. We opt to keep the spl out of the loop 143 * for efficiency. 144 */ 145 s = splbio(); 146 for (i = 1; i < maxra; i++) { 147 148 if (!(tbp = incore(vp, lblkno+i))) { 149 break; 150 } 151 152 /* 153 * Set another read-ahead mark so we know 154 * to check again. 155 */ 156 if (((i % racluster) == (racluster - 1)) || 157 (i == (maxra - 1))) 158 tbp->b_flags |= B_RAM; 159 } 160 splx(s); 161 if (i >= maxra) { 162 return 0; 163 } 164 lblkno += i; 165 } 166 reqbp = bp = NULL; 167 } else { 168 off_t firstread = bp->b_offset; 169 170 KASSERT(bp->b_offset != NOOFFSET, 171 ("cluster_read: no buffer offset")); 172 if (firstread + totread > filesize) 173 totread = filesize - firstread; 174 if (totread > size) { 175 int nblks = 0; 176 int ncontigafter; 177 while (totread > 0) { 178 nblks++; 179 totread -= size; 180 } 181 if (nblks == 1) 182 goto single_block_read; 183 if (nblks > racluster) 184 nblks = racluster; 185 186 error = VOP_BMAP(vp, lblkno, NULL, 187 &blkno, &ncontigafter, NULL); 188 if (error) 189 goto single_block_read; 190 if (blkno == -1) 191 goto single_block_read; 192 if (ncontigafter == 0) 193 goto single_block_read; 194 if (ncontigafter + 1 < nblks) 195 nblks = ncontigafter + 1; 196 197 bp = cluster_rbuild(vp, filesize, lblkno, 198 blkno, size, nblks, bp); 199 lblkno += (bp->b_bufsize / size); 200 } else { 201single_block_read: 202 /* 203 * if it isn't in the cache, then get a chunk from 204 * disk if sequential, otherwise just get the block. 205 */ 206 bp->b_flags |= B_READ | B_RAM; 207 lblkno += 1; 208 } 209 } 210 211 /* 212 * if we have been doing sequential I/O, then do some read-ahead 213 */ 214 rbp = NULL; 215 if (seqcount && (lblkno < (origblkno + seqcount))) { 216 /* 217 * we now build the read-ahead buffer if it is desirable. 218 */ 219 if (((u_quad_t)(lblkno + 1) * size) <= filesize && 220 !(error = VOP_BMAP(vp, lblkno, NULL, &blkno, &num_ra, NULL)) && 221 blkno != -1) { 222 int nblksread; 223 int ntoread = num_ra + 1; 224 nblksread = (origtotread + size - 1) / size; 225 if (seqcount < nblksread) 226 seqcount = nblksread; 227 if (seqcount < ntoread) 228 ntoread = seqcount; 229 if (num_ra) { 230 rbp = cluster_rbuild(vp, filesize, lblkno, 231 blkno, size, ntoread, NULL); 232 } else { 233 rbp = getblk(vp, lblkno, size, 0, 0); 234 rbp->b_flags |= B_READ | B_ASYNC | B_RAM; 235 rbp->b_blkno = blkno; 236 } 237 } 238 } 239 240 /* 241 * handle the synchronous read 242 */ 243 if (bp) { 244#if defined(CLUSTERDEBUG) 245 if (rcluster) 246 printf("S(%ld,%ld,%d) ", 247 (long)bp->b_lblkno, bp->b_bcount, seqcount); 248#endif 249 if ((bp->b_flags & B_CLUSTER) == 0) 250 vfs_busy_pages(bp, 0); 251 bp->b_flags &= ~(B_ERROR|B_INVAL); 252 if (bp->b_flags & (B_ASYNC|B_CALL)) 253 BUF_KERNPROC(bp); 254 error = VOP_STRATEGY(vp, bp); 255 curproc->p_stats->p_ru.ru_inblock++; 256 } 257 258 /* 259 * and if we have read-aheads, do them too 260 */ 261 if (rbp) { 262 if (error) { 263 rbp->b_flags &= ~(B_ASYNC | B_READ); 264 brelse(rbp); 265 } else if (rbp->b_flags & B_CACHE) { 266 rbp->b_flags &= ~(B_ASYNC | B_READ); 267 bqrelse(rbp); 268 } else { 269#if defined(CLUSTERDEBUG) 270 if (rcluster) { 271 if (bp) 272 printf("A+(%ld,%ld,%ld,%d) ", 273 (long)rbp->b_lblkno, rbp->b_bcount, 274 (long)(rbp->b_lblkno - origblkno), 275 seqcount); 276 else 277 printf("A(%ld,%ld,%ld,%d) ", 278 (long)rbp->b_lblkno, rbp->b_bcount, 279 (long)(rbp->b_lblkno - origblkno), 280 seqcount); 281 } 282#endif 283 284 if ((rbp->b_flags & B_CLUSTER) == 0) 285 vfs_busy_pages(rbp, 0); 286 rbp->b_flags &= ~(B_ERROR|B_INVAL); 287 if (rbp->b_flags & (B_ASYNC|B_CALL)) 288 BUF_KERNPROC(rbp); 289 (void) VOP_STRATEGY(vp, rbp); 290 curproc->p_stats->p_ru.ru_inblock++; 291 } 292 } 293 if (reqbp) 294 return (biowait(reqbp)); 295 else 296 return (error); 297} 298 299/* 300 * If blocks are contiguous on disk, use this to provide clustered 301 * read ahead. We will read as many blocks as possible sequentially 302 * and then parcel them up into logical blocks in the buffer hash table. 303 */ 304static struct buf * 305cluster_rbuild(vp, filesize, lbn, blkno, size, run, fbp) 306 struct vnode *vp; 307 u_quad_t filesize; 308 daddr_t lbn; 309 daddr_t blkno; 310 long size; 311 int run; 312 struct buf *fbp; 313{ 314 struct buf *bp, *tbp; 315 daddr_t bn; 316 int i, inc, j; 317 318 KASSERT(size == vp->v_mount->mnt_stat.f_iosize, 319 ("cluster_rbuild: size %ld != filesize %ld\n", 320 size, vp->v_mount->mnt_stat.f_iosize)); 321 322 /* 323 * avoid a division 324 */ 325 while ((u_quad_t) size * (lbn + run) > filesize) { 326 --run; 327 } 328 329 if (fbp) { 330 tbp = fbp; 331 tbp->b_flags |= B_READ; 332 } else { 333 tbp = getblk(vp, lbn, size, 0, 0); 334 if (tbp->b_flags & B_CACHE) 335 return tbp; 336 tbp->b_flags |= B_ASYNC | B_READ | B_RAM; 337 } 338 339 tbp->b_blkno = blkno; 340 if( (tbp->b_flags & B_MALLOC) || 341 ((tbp->b_flags & B_VMIO) == 0) || (run <= 1) ) 342 return tbp; 343 344 bp = trypbuf(&cluster_pbuf_freecnt); 345 if (bp == 0) 346 return tbp; 347 348 bp->b_data = (char *)((vm_offset_t)bp->b_data | 349 ((vm_offset_t)tbp->b_data & PAGE_MASK)); 350 bp->b_flags = B_ASYNC | B_READ | B_CALL | B_CLUSTER | B_VMIO; 351 bp->b_iodone = cluster_callback; 352 bp->b_blkno = blkno; 353 bp->b_lblkno = lbn; 354 bp->b_offset = tbp->b_offset; 355 KASSERT(bp->b_offset != NOOFFSET, ("cluster_rbuild: no buffer offset")); 356 pbgetvp(vp, bp); 357 358 TAILQ_INIT(&bp->b_cluster.cluster_head); 359 360 bp->b_bcount = 0; 361 bp->b_bufsize = 0; 362 bp->b_npages = 0; 363 364 if (vp->v_maxio == 0) 365 vp->v_maxio = DFLTPHYS; 366 inc = btodb(size); 367 for (bn = blkno, i = 0; i < run; ++i, bn += inc) { 368 if (i != 0) { 369 if ((bp->b_npages * PAGE_SIZE) + 370 round_page(size) > vp->v_maxio) 371 break; 372 373 if ((tbp = incore(vp, lbn + i)) != NULL) { 374 if (BUF_LOCK(tbp, LK_EXCLUSIVE | LK_NOWAIT)) 375 break; 376 BUF_UNLOCK(tbp); 377 378 for (j = 0; j < tbp->b_npages; j++) 379 if (tbp->b_pages[j]->valid) 380 break; 381 382 if (j != tbp->b_npages) 383 break; 384 385 if (tbp->b_bcount != size) 386 break; 387 } 388 389 tbp = getblk(vp, lbn + i, size, 0, 0); 390 391 if ((tbp->b_flags & B_CACHE) || 392 (tbp->b_flags & B_VMIO) == 0) { 393 bqrelse(tbp); 394 break; 395 } 396 397 for (j = 0;j < tbp->b_npages; j++) 398 if (tbp->b_pages[j]->valid) 399 break; 400 401 if (j != tbp->b_npages) { 402 bqrelse(tbp); 403 break; 404 } 405 406 if ((fbp && (i == 1)) || (i == (run - 1))) 407 tbp->b_flags |= B_RAM; 408 tbp->b_flags |= B_READ | B_ASYNC; 409 if (tbp->b_blkno == tbp->b_lblkno) { 410 tbp->b_blkno = bn; 411 } else if (tbp->b_blkno != bn) { 412 brelse(tbp); 413 break; 414 } 415 } 416 /* 417 * XXX fbp from caller may not be B_ASYNC, but we are going 418 * to biodone() it in cluster_callback() anyway 419 */ 420 BUF_KERNPROC(tbp); 421 TAILQ_INSERT_TAIL(&bp->b_cluster.cluster_head, 422 tbp, b_cluster.cluster_entry); 423 for (j = 0; j < tbp->b_npages; j += 1) { 424 vm_page_t m; 425 m = tbp->b_pages[j]; 426 vm_page_io_start(m); 427 vm_object_pip_add(m->object, 1); 428 if ((bp->b_npages == 0) || 429 (bp->b_pages[bp->b_npages-1] != m)) { 430 bp->b_pages[bp->b_npages] = m; 431 bp->b_npages++; 432 } 433 if ((m->valid & VM_PAGE_BITS_ALL) == VM_PAGE_BITS_ALL) 434 tbp->b_pages[j] = bogus_page; 435 } 436 bp->b_bcount += tbp->b_bcount; 437 bp->b_bufsize += tbp->b_bufsize; 438 } 439 440 for(j=0;j<bp->b_npages;j++) { 441 if ((bp->b_pages[j]->valid & VM_PAGE_BITS_ALL) == 442 VM_PAGE_BITS_ALL) 443 bp->b_pages[j] = bogus_page; 444 } 445 if (bp->b_bufsize > bp->b_kvasize) 446 panic("cluster_rbuild: b_bufsize(%ld) > b_kvasize(%d)\n", 447 bp->b_bufsize, bp->b_kvasize); 448 bp->b_kvasize = bp->b_bufsize; 449 450 pmap_qenter(trunc_page((vm_offset_t) bp->b_data), 451 (vm_page_t *)bp->b_pages, bp->b_npages); 452 return (bp); 453} 454 455/* 456 * Cleanup after a clustered read or write. 457 * This is complicated by the fact that any of the buffers might have 458 * extra memory (if there were no empty buffer headers at allocbuf time) 459 * that we will need to shift around. 460 */ 461void 462cluster_callback(bp) 463 struct buf *bp; 464{ 465 struct buf *nbp, *tbp; 466 int error = 0; 467 468 /* 469 * Must propogate errors to all the components. 470 */ 471 if (bp->b_flags & B_ERROR) 472 error = bp->b_error; 473 474 pmap_qremove(trunc_page((vm_offset_t) bp->b_data), bp->b_npages); 475 /* 476 * Move memory from the large cluster buffer into the component 477 * buffers and mark IO as done on these. 478 */ 479 for (tbp = TAILQ_FIRST(&bp->b_cluster.cluster_head); 480 tbp; tbp = nbp) { 481 nbp = TAILQ_NEXT(&tbp->b_cluster, cluster_entry); 482 if (error) { 483 tbp->b_flags |= B_ERROR; 484 tbp->b_error = error; 485 } else { 486 tbp->b_dirtyoff = tbp->b_dirtyend = 0; 487 tbp->b_flags &= ~(B_ERROR|B_INVAL); 488 } 489 biodone(tbp); 490 } 491 relpbuf(bp, &cluster_pbuf_freecnt); 492} 493 494/* 495 * cluster_wbuild_wb: 496 * 497 * Implement modified write build for cluster. 498 * 499 * write_behind = 0 write behind disabled 500 * write_behind = 1 write behind normal (default) 501 * write_behind = 2 write behind backed-off 502 */ 503 504static __inline int 505cluster_wbuild_wb(struct vnode *vp, long size, daddr_t start_lbn, int len) 506{ 507 int r = 0; 508 509 switch(write_behind) { 510 case 2: 511 if (start_lbn < len) 512 break; 513 start_lbn -= len; 514 /* fall through */ 515 case 1: 516 r = cluster_wbuild(vp, size, start_lbn, len); 517 /* fall through */ 518 default: 519 /* fall through */ 520 break; 521 } 522 return(r); 523} 524 525/* 526 * Do clustered write for FFS. 527 * 528 * Three cases: 529 * 1. Write is not sequential (write asynchronously) 530 * Write is sequential: 531 * 2. beginning of cluster - begin cluster 532 * 3. middle of a cluster - add to cluster 533 * 4. end of a cluster - asynchronously write cluster 534 */ 535void 536cluster_write(bp, filesize) 537 struct buf *bp; 538 u_quad_t filesize; 539{ 540 struct vnode *vp; 541 daddr_t lbn; 542 int maxclen, cursize; 543 int lblocksize; 544 int async; 545 546 vp = bp->b_vp; 547 if (vp->v_maxio == 0) 548 vp->v_maxio = DFLTPHYS; 549 if (vp->v_type == VREG) { 550 async = vp->v_mount->mnt_flag & MNT_ASYNC; 551 lblocksize = vp->v_mount->mnt_stat.f_iosize; 552 } else { 553 async = 0; 554 lblocksize = bp->b_bufsize; 555 } 556 lbn = bp->b_lblkno; 557 KASSERT(bp->b_offset != NOOFFSET, ("cluster_write: no buffer offset")); 558 559 /* Initialize vnode to beginning of file. */ 560 if (lbn == 0) 561 vp->v_lasta = vp->v_clen = vp->v_cstart = vp->v_lastw = 0; 562 563 if (vp->v_clen == 0 || lbn != vp->v_lastw + 1 || 564 (bp->b_blkno != vp->v_lasta + btodb(lblocksize))) { 565 maxclen = vp->v_maxio / lblocksize - 1; 566 if (vp->v_clen != 0) { 567 /* 568 * Next block is not sequential. 569 * 570 * If we are not writing at end of file, the process 571 * seeked to another point in the file since its last 572 * write, or we have reached our maximum cluster size, 573 * then push the previous cluster. Otherwise try 574 * reallocating to make it sequential. 575 */ 576 cursize = vp->v_lastw - vp->v_cstart + 1; 577 if (((u_quad_t) bp->b_offset + lblocksize) != filesize || 578 lbn != vp->v_lastw + 1 || vp->v_clen <= cursize) { 579 if (!async) 580 cluster_wbuild_wb(vp, lblocksize, 581 vp->v_cstart, cursize); 582 } else { 583 struct buf **bpp, **endbp; 584 struct cluster_save *buflist; 585 586 buflist = cluster_collectbufs(vp, bp); 587 endbp = &buflist->bs_children 588 [buflist->bs_nchildren - 1]; 589 if (VOP_REALLOCBLKS(vp, buflist)) { 590 /* 591 * Failed, push the previous cluster. 592 */ 593 for (bpp = buflist->bs_children; 594 bpp < endbp; bpp++) 595 brelse(*bpp); 596 free(buflist, M_SEGMENT); 597 cluster_wbuild_wb(vp, lblocksize, 598 vp->v_cstart, cursize); 599 } else { 600 /* 601 * Succeeded, keep building cluster. 602 */ 603 for (bpp = buflist->bs_children; 604 bpp <= endbp; bpp++) 605 bdwrite(*bpp); 606 free(buflist, M_SEGMENT); 607 vp->v_lastw = lbn; 608 vp->v_lasta = bp->b_blkno; 609 return; 610 } 611 } 612 } 613 /* 614 * Consider beginning a cluster. If at end of file, make 615 * cluster as large as possible, otherwise find size of 616 * existing cluster. 617 */ 618 if ((vp->v_type == VREG) && 619 ((u_quad_t) bp->b_offset + lblocksize) != filesize && 620 (bp->b_blkno == bp->b_lblkno) && 621 (VOP_BMAP(vp, lbn, NULL, &bp->b_blkno, &maxclen, NULL) || 622 bp->b_blkno == -1)) { 623 bawrite(bp); 624 vp->v_clen = 0; 625 vp->v_lasta = bp->b_blkno; 626 vp->v_cstart = lbn + 1; 627 vp->v_lastw = lbn; 628 return; 629 } 630 vp->v_clen = maxclen; 631 if (!async && maxclen == 0) { /* I/O not contiguous */ 632 vp->v_cstart = lbn + 1; 633 bawrite(bp); 634 } else { /* Wait for rest of cluster */ 635 vp->v_cstart = lbn; 636 bdwrite(bp); 637 } 638 } else if (lbn == vp->v_cstart + vp->v_clen) { 639 /* 640 * At end of cluster, write it out. 641 */ 642 bdwrite(bp); 643 cluster_wbuild_wb(vp, lblocksize, vp->v_cstart, vp->v_clen + 1); 644 vp->v_clen = 0; 645 vp->v_cstart = lbn + 1; 646 } else 647 /* 648 * In the middle of a cluster, so just delay the I/O for now. 649 */ 650 bdwrite(bp); 651 vp->v_lastw = lbn; 652 vp->v_lasta = bp->b_blkno; 653} 654 655 656/* 657 * This is an awful lot like cluster_rbuild...wish they could be combined. 658 * The last lbn argument is the current block on which I/O is being 659 * performed. Check to see that it doesn't fall in the middle of 660 * the current block (if last_bp == NULL). 661 */ 662int 663cluster_wbuild(vp, size, start_lbn, len) 664 struct vnode *vp; 665 long size; 666 daddr_t start_lbn; 667 int len; 668{ 669 struct buf *bp, *tbp; 670 int i, j, s; 671 int totalwritten = 0; 672 int dbsize = btodb(size); 673 674 if (vp->v_maxio == 0) 675 vp->v_maxio = DFLTPHYS; 676 while (len > 0) { 677 s = splbio(); 678 if (((tbp = gbincore(vp, start_lbn)) == NULL) || 679 ((tbp->b_flags & (B_INVAL | B_DELWRI)) != B_DELWRI) || 680 BUF_LOCK(tbp, LK_EXCLUSIVE | LK_NOWAIT)) { 681 ++start_lbn; 682 --len; 683 splx(s); 684 continue; 685 } 686 bremfree(tbp); 687 tbp->b_flags &= ~B_DONE; 688 splx(s); 689 690 /* 691 * Extra memory in the buffer, punt on this buffer. 692 * XXX we could handle this in most cases, but we would 693 * have to push the extra memory down to after our max 694 * possible cluster size and then potentially pull it back 695 * up if the cluster was terminated prematurely--too much 696 * hassle. 697 */ 698 if (((tbp->b_flags & (B_CLUSTEROK|B_MALLOC)) != B_CLUSTEROK) || 699 (tbp->b_bcount != tbp->b_bufsize) || 700 (tbp->b_bcount != size) || 701 (len == 1) || 702 ((bp = getpbuf(&cluster_pbuf_freecnt)) == NULL)) { 703 totalwritten += tbp->b_bufsize; 704 bawrite(tbp); 705 ++start_lbn; 706 --len; 707 continue; 708 } 709 710 /* 711 * We got a pbuf to make the cluster in. 712 * so initialise it. 713 */ 714 TAILQ_INIT(&bp->b_cluster.cluster_head); 715 bp->b_bcount = 0; 716 bp->b_bufsize = 0; 717 bp->b_npages = 0; 718 if (tbp->b_wcred != NOCRED) { 719 bp->b_wcred = tbp->b_wcred; 720 crhold(bp->b_wcred); 721 } 722 723 bp->b_blkno = tbp->b_blkno; 724 bp->b_lblkno = tbp->b_lblkno; 725 bp->b_offset = tbp->b_offset; 726 bp->b_data = (char *)((vm_offset_t)bp->b_data | 727 ((vm_offset_t)tbp->b_data & PAGE_MASK)); 728 bp->b_flags |= B_CALL | B_CLUSTER | 729 (tbp->b_flags & (B_VMIO | B_NEEDCOMMIT)); 730 bp->b_iodone = cluster_callback; 731 pbgetvp(vp, bp); 732 /* 733 * From this location in the file, scan forward to see 734 * if there are buffers with adjacent data that need to 735 * be written as well. 736 */ 737 for (i = 0; i < len; ++i, ++start_lbn) { 738 if (i != 0) { /* If not the first buffer */ 739 s = splbio(); 740 /* 741 * If the adjacent data is not even in core it 742 * can't need to be written. 743 */ 744 if ((tbp = gbincore(vp, start_lbn)) == NULL) { 745 splx(s); 746 break; 747 } 748 749 /* 750 * If it IS in core, but has different 751 * characteristics, don't cluster with it. 752 */ 753 if ((tbp->b_flags & (B_VMIO | B_CLUSTEROK | 754 B_INVAL | B_DELWRI | B_NEEDCOMMIT)) 755 != (B_DELWRI | B_CLUSTEROK | 756 (bp->b_flags & (B_VMIO | B_NEEDCOMMIT))) || 757 tbp->b_wcred != bp->b_wcred || 758 BUF_LOCK(tbp, LK_EXCLUSIVE | LK_NOWAIT)) { 759 splx(s); 760 break; 761 } 762 763 /* 764 * Check that the combined cluster 765 * would make sense with regard to pages 766 * and would not be too large 767 */ 768 if ((tbp->b_bcount != size) || 769 ((bp->b_blkno + (dbsize * i)) != 770 tbp->b_blkno) || 771 ((tbp->b_npages + bp->b_npages) > 772 (vp->v_maxio / PAGE_SIZE))) { 773 BUF_UNLOCK(tbp); 774 splx(s); 775 break; 776 } 777 /* 778 * Ok, it's passed all the tests, 779 * so remove it from the free list 780 * and mark it busy. We will use it. 781 */ 782 bremfree(tbp); 783 tbp->b_flags &= ~B_DONE; 784 splx(s); 785 } /* end of code for non-first buffers only */ 786 /* check for latent dependencies to be handled */ 787 if ((LIST_FIRST(&tbp->b_dep)) != NULL && 788 bioops.io_start) 789 (*bioops.io_start)(tbp); 790 /* 791 * If the IO is via the VM then we do some 792 * special VM hackery. (yuck) 793 */ 794 if (tbp->b_flags & B_VMIO) { 795 vm_page_t m; 796 797 if (i != 0) { /* if not first buffer */ 798 for (j = 0; j < tbp->b_npages; j += 1) { 799 m = tbp->b_pages[j]; 800 if (m->flags & PG_BUSY) { 801 bqrelse(tbp); 802 goto finishcluster; 803 } 804 } 805 } 806 807 for (j = 0; j < tbp->b_npages; j += 1) { 808 m = tbp->b_pages[j]; 809 vm_page_io_start(m); 810 vm_object_pip_add(m->object, 1); 811 if ((bp->b_npages == 0) || 812 (bp->b_pages[bp->b_npages - 1] != m)) { 813 bp->b_pages[bp->b_npages] = m; 814 bp->b_npages++; 815 } 816 } 817 } 818 bp->b_bcount += size; 819 bp->b_bufsize += size; 820 821 s = splbio(); 822 bundirty(tbp); 823 tbp->b_flags &= ~(B_READ | B_DONE | B_ERROR); 824 tbp->b_flags |= B_ASYNC; 825 reassignbuf(tbp, tbp->b_vp); /* put on clean list */ 826 ++tbp->b_vp->v_numoutput; 827 splx(s); 828 BUF_KERNPROC(tbp); 829 TAILQ_INSERT_TAIL(&bp->b_cluster.cluster_head, 830 tbp, b_cluster.cluster_entry); 831 } 832 finishcluster: 833 pmap_qenter(trunc_page((vm_offset_t) bp->b_data), 834 (vm_page_t *) bp->b_pages, bp->b_npages); 835 if (bp->b_bufsize > bp->b_kvasize) 836 panic( 837 "cluster_wbuild: b_bufsize(%ld) > b_kvasize(%d)\n", 838 bp->b_bufsize, bp->b_kvasize); 839 bp->b_kvasize = bp->b_bufsize; 840 totalwritten += bp->b_bufsize; 841 bp->b_dirtyoff = 0; 842 bp->b_dirtyend = bp->b_bufsize; 843 bawrite(bp); 844 845 len -= i; 846 } 847 return totalwritten; 848} 849 850/* 851 * Collect together all the buffers in a cluster. 852 * Plus add one additional buffer. 853 */ 854static struct cluster_save * 855cluster_collectbufs(vp, last_bp) 856 struct vnode *vp; 857 struct buf *last_bp; 858{ 859 struct cluster_save *buflist; 860 struct buf *bp; 861 daddr_t lbn; 862 int i, len; 863 864 len = vp->v_lastw - vp->v_cstart + 1; 865 buflist = malloc(sizeof(struct buf *) * (len + 1) + sizeof(*buflist), 866 M_SEGMENT, M_WAITOK); 867 buflist->bs_nchildren = 0; 868 buflist->bs_children = (struct buf **) (buflist + 1); 869 for (lbn = vp->v_cstart, i = 0; i < len; lbn++, i++) { 870 (void) bread(vp, lbn, last_bp->b_bcount, NOCRED, &bp); 871 buflist->bs_children[i] = bp; 872 if (bp->b_blkno == bp->b_lblkno) 873 VOP_BMAP(bp->b_vp, bp->b_lblkno, NULL, &bp->b_blkno, 874 NULL, NULL); 875 } 876 buflist->bs_children[i] = bp = last_bp; 877 if (bp->b_blkno == bp->b_lblkno) 878 VOP_BMAP(bp->b_vp, bp->b_lblkno, NULL, &bp->b_blkno, 879 NULL, NULL); 880 buflist->bs_nchildren = i + 1; 881 return (buflist); 882} 883