vfs_cluster.c revision 7164
11556Srgrimes/*- 21556Srgrimes * Copyright (c) 1993 31556Srgrimes * The Regents of the University of California. All rights reserved. 41556Srgrimes * Modifications/enhancements: 51556Srgrimes * Copyright (c) 1995 John S. Dyson. All rights reserved. 61556Srgrimes * 71556Srgrimes * Redistribution and use in source and binary forms, with or without 81556Srgrimes * modification, are permitted provided that the following conditions 91556Srgrimes * are met: 101556Srgrimes * 1. Redistributions of source code must retain the above copyright 111556Srgrimes * notice, this list of conditions and the following disclaimer. 121556Srgrimes * 2. Redistributions in binary form must reproduce the above copyright 131556Srgrimes * notice, this list of conditions and the following disclaimer in the 141556Srgrimes * documentation and/or other materials provided with the distribution. 151556Srgrimes * 3. All advertising materials mentioning features or use of this software 161556Srgrimes * must display the following acknowledgement: 171556Srgrimes * This product includes software developed by the University of 181556Srgrimes * California, Berkeley and its contributors. 191556Srgrimes * 4. Neither the name of the University nor the names of its contributors 201556Srgrimes * may be used to endorse or promote products derived from this software 211556Srgrimes * without specific prior written permission. 221556Srgrimes * 231556Srgrimes * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 241556Srgrimes * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 251556Srgrimes * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 261556Srgrimes * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 271556Srgrimes * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 281556Srgrimes * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 291556Srgrimes * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 30110390Scharnier * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 311556Srgrimes * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 3236006Scharnier * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 33110390Scharnier * SUCH DAMAGE. 3435773Scharnier * 35110390Scharnier * @(#)vfs_cluster.c 8.7 (Berkeley) 2/13/94 3699109Sobrien * $Id: vfs_cluster.c,v 1.13 1995/03/16 18:12:48 bde Exp $ 3799109Sobrien */ 381556Srgrimes 391556Srgrimes#include <sys/param.h> 401556Srgrimes#include <sys/systm.h> 411556Srgrimes#include <sys/proc.h> 421556Srgrimes#include <sys/buf.h> 431556Srgrimes#include <sys/vnode.h> 441556Srgrimes#include <sys/mount.h> 451556Srgrimes#include <sys/malloc.h> 461556Srgrimes#include <sys/resourcevar.h> 471556Srgrimes#include <sys/vmmeter.h> 481556Srgrimes#include <miscfs/specfs/specdev.h> 491556Srgrimes#include <vm/vm.h> 501556Srgrimes#include <vm/vm_pageout.h> 511556Srgrimes 521556Srgrimes#ifdef DEBUG 531556Srgrimes#include <vm/vm.h> 541556Srgrimes#include <sys/sysctl.h> 551556Srgrimesint doreallocblks = 0; 561556Srgrimesstruct ctldebug debug13 = {"doreallocblks", &doreallocblks}; 571556Srgrimes 581556Srgrimes#else 591556Srgrimes/* XXX for cluster_write */ 601556Srgrimes#define doreallocblks 0 611556Srgrimes#endif 621556Srgrimes 631556Srgrimes/* 641556Srgrimes * Local declarations 651556Srgrimes */ 6690108Simpstruct buf *cluster_rbuild __P((struct vnode *, u_quad_t, struct buf *, 671556Srgrimes daddr_t, daddr_t, long, int, long)); 681556Srgrimesstruct cluster_save *cluster_collectbufs __P((struct vnode *, struct buf *)); 691556Srgrimes 701556Srgrimesint totreads; 7191079Smarkmint totreadblocks; 721556Srgrimes 731556Srgrimes#ifdef DIAGNOSTIC 7498062Skeramida/* 7598062Skeramida * Set to 1 if reads of block zero should cause readahead to be done. 761556Srgrimes * Set to 0 treats a read of block zero as a non-sequential read. 771556Srgrimes * 781556Srgrimes * Setting to one assumes that most reads of block zero of files are due to 79110390Scharnier * sequential passes over the files (e.g. cat, sum) where additional blocks 801556Srgrimes * will soon be needed. Setting to zero assumes that the majority are 811556Srgrimes * surgical strikes to get particular info (e.g. size, file) where readahead 821556Srgrimes * blocks will not be used and, in fact, push out other potentially useful 831556Srgrimes * blocks from the cache. The former seems intuitive, but some quick tests 841556Srgrimes * showed that the latter performed better from a system-wide point of view. 851556Srgrimes */ 861556Srgrimes int doclusterraz = 0; 871556Srgrimes 88244538Skevlo#define ISSEQREAD(vp, blk) \ 891556Srgrimes (((blk) != 0 || doclusterraz) && \ 901556Srgrimes ((blk) == (vp)->v_lastr + 1 || (blk) == (vp)->v_lastr)) 911556Srgrimes#else 921556Srgrimes#define ISSEQREAD(vp, blk) \ 9391079Smarkm (/* (blk) != 0 && */ ((blk) == (vp)->v_lastr + 1 || (blk) == (vp)->v_lastr)) 9491079Smarkm#endif 951556Srgrimes 9691079Smarkm/* 9791079Smarkm * This replaces bread. If this is a bread at the beginning of a file and 981556Srgrimes * lastr is 0, we assume this is the first read and we'll read up to two 991556Srgrimes * blocks if they are sequential. After that, we'll do regular read ahead 1001556Srgrimes * in clustered chunks. 1011556Srgrimes * bp is the block requested. 1021556Srgrimes * rbp is the read-ahead block. 1031556Srgrimes * If either is NULL, then you don't have to do the I/O. 1041556Srgrimes */ 1051556Srgrimesint 1061556Srgrimescluster_read(vp, filesize, lblkno, size, cred, bpp) 1071556Srgrimes struct vnode *vp; 1081556Srgrimes u_quad_t filesize; 109161469Simp daddr_t lblkno; 1101556Srgrimes long size; 1111556Srgrimes struct ucred *cred; 1121556Srgrimes struct buf **bpp; 1131556Srgrimes{ 1141556Srgrimes struct buf *bp, *rbp; 1151556Srgrimes daddr_t blkno, rablkno, origlblkno; 116161469Simp long flags; 1171556Srgrimes int error, num_ra, alreadyincore; 1181556Srgrimes 1191556Srgrimes origlblkno = lblkno; 1201556Srgrimes error = 0; 1211556Srgrimes /* 1221556Srgrimes * get the requested block 1231556Srgrimes */ 1241556Srgrimes *bpp = bp = getblk(vp, lblkno, size, 0, 0); 1251556Srgrimes /* 1261556Srgrimes * if it is in the cache, then check to see if the reads have been 1271556Srgrimes * sequential. If they have, then try some read-ahead, otherwise 1281556Srgrimes * back-off on prospective read-aheads. 1291556Srgrimes */ 1301556Srgrimes if (bp->b_flags & B_CACHE) { 1311556Srgrimes int i; 1321556Srgrimes 1331556Srgrimes if (!ISSEQREAD(vp, origlblkno)) { 1341556Srgrimes vp->v_maxra = bp->b_lblkno + bp->b_bcount / size; 1351556Srgrimes vp->v_ralen >>= 1; 1361556Srgrimes return 0; 1371556Srgrimes } else if( vp->v_maxra >= origlblkno) { 1381556Srgrimes if ((vp->v_ralen + 1) < (MAXPHYS / size)) 1391556Srgrimes vp->v_ralen++; 1401556Srgrimes if ( vp->v_maxra >= (origlblkno + vp->v_ralen)) 14191079Smarkm return 0; 1421556Srgrimes lblkno = vp->v_maxra; 14391079Smarkm } 14491079Smarkm bp = NULL; 14591079Smarkm } else { 1461556Srgrimes /* 1471556Srgrimes * if it isn't in the cache, then get a chunk from disk if 1481556Srgrimes * sequential, otherwise just get the block. 1491556Srgrimes */ 1501556Srgrimes bp->b_flags |= B_READ; 1511556Srgrimes lblkno += 1; 1521556Srgrimes curproc->p_stats->p_ru.ru_inblock++; /* XXX */ 1531556Srgrimes } 1541556Srgrimes /* 1551556Srgrimes * if ralen is "none", then try a little 1561556Srgrimes */ 1571556Srgrimes if (vp->v_ralen == 0) 1581556Srgrimes vp->v_ralen = 1; 1591556Srgrimes /* 1601556Srgrimes * assume no read-ahead 1611556Srgrimes */ 1621556Srgrimes alreadyincore = 1; 1631556Srgrimes rablkno = lblkno; 1641556Srgrimes 1651556Srgrimes /* 1661556Srgrimes * if we have been doing sequential I/O, then do some read-ahead 1671556Srgrimes */ 1681556Srgrimes if (ISSEQREAD(vp, origlblkno)) { 1698855Srgrimes int i; 1701556Srgrimes 1711556Srgrimes /* 1721556Srgrimes * this code makes sure that the stuff that we have read-ahead 1731556Srgrimes * is still in the cache. If it isn't, we have been reading 1741556Srgrimes * ahead too much, and we need to back-off, otherwise we might 1751556Srgrimes * try to read more. 1761556Srgrimes */ 1771556Srgrimes for (i = 0; i < vp->v_ralen; i++) { 1781556Srgrimes rablkno = lblkno + i; 1791556Srgrimes alreadyincore = (int) incore(vp, rablkno); 1801556Srgrimes if (!alreadyincore) { 1811556Srgrimes if (rablkno < vp->v_maxra) { 182 vp->v_maxra = rablkno; 183 vp->v_ralen >>= 1; 184 alreadyincore = 1; 185 } else { 186 if (inmem(vp, rablkno)) { 187 if( vp->v_maxra < rablkno) 188 vp->v_maxra = rablkno + 1; 189 continue; 190 } 191 if ((vp->v_ralen + 1) < MAXPHYS / size) 192 vp->v_ralen++; 193 } 194 break; 195 } else if( vp->v_maxra < rablkno) { 196 vp->v_maxra = rablkno + 1; 197 } 198 } 199 } 200 /* 201 * we now build the read-ahead buffer if it is desirable. 202 */ 203 rbp = NULL; 204 if (!alreadyincore && 205 (rablkno + 1) * size <= filesize && 206 !(error = VOP_BMAP(vp, rablkno, NULL, &blkno, &num_ra)) && 207 blkno != -1) { 208 if ((vp->v_ralen + 1) < MAXPHYS / size) 209 vp->v_ralen++; 210 if (num_ra > vp->v_ralen) 211 num_ra = vp->v_ralen; 212 213 if (num_ra) { 214 rbp = cluster_rbuild(vp, filesize, 215 NULL, rablkno, blkno, size, num_ra, B_READ | B_ASYNC); 216 } else { 217 rbp = getblk(vp, rablkno, size, 0, 0); 218 rbp->b_flags |= B_READ | B_ASYNC; 219 rbp->b_blkno = blkno; 220 } 221 } 222 223 /* 224 * if the synchronous read is a cluster, handle it, otherwise do a 225 * simple, non-clustered read. 226 */ 227 if (bp) { 228 if (bp->b_flags & (B_DONE | B_DELWRI)) 229 panic("cluster_read: DONE bp"); 230 else { 231 vfs_busy_pages(bp, 0); 232 error = VOP_STRATEGY(bp); 233 vp->v_maxra = bp->b_lblkno + bp->b_bcount / size; 234 totreads++; 235 totreadblocks += bp->b_bcount / size; 236 curproc->p_stats->p_ru.ru_inblock++; 237 } 238 } 239 /* 240 * and if we have read-aheads, do them too 241 */ 242 if (rbp) { 243 vp->v_maxra = rbp->b_lblkno + rbp->b_bcount / size; 244 if (error || (rbp->b_flags & B_CACHE)) { 245 rbp->b_flags &= ~(B_ASYNC | B_READ); 246 brelse(rbp); 247 } else { 248 vfs_busy_pages(rbp, 0); 249 (void) VOP_STRATEGY(rbp); 250 totreads++; 251 totreadblocks += rbp->b_bcount / size; 252 curproc->p_stats->p_ru.ru_inblock++; 253 } 254 } 255 if (bp && ((bp->b_flags & B_ASYNC) == 0)) 256 return (biowait(bp)); 257 return (error); 258} 259 260/* 261 * If blocks are contiguous on disk, use this to provide clustered 262 * read ahead. We will read as many blocks as possible sequentially 263 * and then parcel them up into logical blocks in the buffer hash table. 264 */ 265struct buf * 266cluster_rbuild(vp, filesize, bp, lbn, blkno, size, run, flags) 267 struct vnode *vp; 268 u_quad_t filesize; 269 struct buf *bp; 270 daddr_t lbn; 271 daddr_t blkno; 272 long size; 273 int run; 274 long flags; 275{ 276 struct cluster_save *b_save; 277 struct buf *tbp; 278 daddr_t bn; 279 int i, inc, j; 280 281#ifdef DIAGNOSTIC 282 if (size != vp->v_mount->mnt_stat.f_iosize) 283 panic("cluster_rbuild: size %d != filesize %d\n", 284 size, vp->v_mount->mnt_stat.f_iosize); 285#endif 286 if (size * (lbn + run + 1) > filesize) 287 --run; 288 if (run == 0) { 289 if (!bp) { 290 bp = getblk(vp, lbn, size, 0, 0); 291 bp->b_blkno = blkno; 292 bp->b_flags |= flags; 293 } 294 return (bp); 295 } 296 tbp = bp; 297 if (!tbp) { 298 tbp = getblk(vp, lbn, size, 0, 0); 299 } 300 if (tbp->b_flags & B_CACHE) { 301 return (tbp); 302 } else if (bp == NULL) { 303 tbp->b_flags |= B_ASYNC; 304 } 305 bp = getpbuf(); 306 bp->b_flags = flags | B_CALL | B_BUSY | B_CLUSTER; 307 bp->b_iodone = cluster_callback; 308 bp->b_blkno = blkno; 309 bp->b_lblkno = lbn; 310 pbgetvp(vp, bp); 311 312 b_save = malloc(sizeof(struct buf *) * (run + 1) + sizeof(struct cluster_save), 313 M_SEGMENT, M_WAITOK); 314 b_save->bs_nchildren = 0; 315 b_save->bs_children = (struct buf **) (b_save + 1); 316 bp->b_saveaddr = b_save; 317 318 bp->b_bcount = 0; 319 bp->b_bufsize = 0; 320 bp->b_npages = 0; 321 322 if (tbp->b_flags & B_VMIO) 323 bp->b_flags |= B_VMIO; 324 325 inc = btodb(size); 326 for (bn = blkno, i = 0; i <= run; ++i, bn += inc) { 327 if (i != 0) { 328 tbp = getblk(vp, lbn + i, size, 0, 0); 329 if ((tbp->b_flags & B_CACHE) || 330 (tbp->b_flags & B_VMIO) != (bp->b_flags & B_VMIO)) { 331 brelse(tbp); 332 break; 333 } 334 tbp->b_blkno = bn; 335 tbp->b_flags |= flags | B_READ | B_ASYNC; 336 } else { 337 tbp->b_flags |= flags | B_READ; 338 } 339 ++b_save->bs_nchildren; 340 b_save->bs_children[i] = tbp; 341 for (j = 0; j < tbp->b_npages; j += 1) { 342 bp->b_pages[j + bp->b_npages] = tbp->b_pages[j]; 343 } 344 bp->b_npages += tbp->b_npages; 345 bp->b_bcount += size; 346 bp->b_bufsize += size; 347 } 348 pmap_qenter((vm_offset_t) bp->b_data, (vm_page_t *)bp->b_pages, bp->b_npages); 349 return (bp); 350} 351 352/* 353 * Cleanup after a clustered read or write. 354 * This is complicated by the fact that any of the buffers might have 355 * extra memory (if there were no empty buffer headers at allocbuf time) 356 * that we will need to shift around. 357 */ 358void 359cluster_callback(bp) 360 struct buf *bp; 361{ 362 struct cluster_save *b_save; 363 struct buf **bpp, *tbp; 364 caddr_t cp; 365 int error = 0; 366 367 /* 368 * Must propogate errors to all the components. 369 */ 370 if (bp->b_flags & B_ERROR) 371 error = bp->b_error; 372 373 b_save = (struct cluster_save *) (bp->b_saveaddr); 374 pmap_qremove((vm_offset_t) bp->b_data, bp->b_npages); 375 /* 376 * Move memory from the large cluster buffer into the component 377 * buffers and mark IO as done on these. 378 */ 379 for (bpp = b_save->bs_children; b_save->bs_nchildren--; ++bpp) { 380 tbp = *bpp; 381 if (error) { 382 tbp->b_flags |= B_ERROR; 383 tbp->b_error = error; 384 } 385 biodone(tbp); 386 } 387 free(b_save, M_SEGMENT); 388 relpbuf(bp); 389} 390 391/* 392 * Do clustered write for FFS. 393 * 394 * Three cases: 395 * 1. Write is not sequential (write asynchronously) 396 * Write is sequential: 397 * 2. beginning of cluster - begin cluster 398 * 3. middle of a cluster - add to cluster 399 * 4. end of a cluster - asynchronously write cluster 400 */ 401void 402cluster_write(bp, filesize) 403 struct buf *bp; 404 u_quad_t filesize; 405{ 406 struct vnode *vp; 407 daddr_t lbn; 408 int maxclen, cursize; 409 int lblocksize; 410 411 vp = bp->b_vp; 412 lblocksize = vp->v_mount->mnt_stat.f_iosize; 413 lbn = bp->b_lblkno; 414 415 /* Initialize vnode to beginning of file. */ 416 if (lbn == 0) 417 vp->v_lasta = vp->v_clen = vp->v_cstart = vp->v_lastw = 0; 418 419 if (vp->v_clen == 0 || lbn != vp->v_lastw + 1 || 420 (bp->b_blkno != vp->v_lasta + btodb(lblocksize))) { 421 maxclen = MAXPHYS / lblocksize - 1; 422 if (vp->v_clen != 0) { 423 /* 424 * Next block is not sequential. 425 * 426 * If we are not writing at end of file, the process 427 * seeked to another point in the file since its last 428 * write, or we have reached our maximum cluster size, 429 * then push the previous cluster. Otherwise try 430 * reallocating to make it sequential. 431 */ 432 cursize = vp->v_lastw - vp->v_cstart + 1; 433 cluster_wbuild(vp, NULL, lblocksize, 434 vp->v_cstart, cursize, lbn); 435 } 436 /* 437 * Consider beginning a cluster. If at end of file, make 438 * cluster as large as possible, otherwise find size of 439 * existing cluster. 440 */ 441 if ((lbn + 1) * lblocksize != filesize && 442 (VOP_BMAP(vp, lbn, NULL, &bp->b_blkno, &maxclen) || 443 bp->b_blkno == -1)) { 444 bawrite(bp); 445 vp->v_clen = 0; 446 vp->v_lasta = bp->b_blkno; 447 vp->v_cstart = lbn + 1; 448 vp->v_lastw = lbn; 449 return; 450 } 451 vp->v_clen = maxclen; 452 if (maxclen == 0) { /* I/O not contiguous */ 453 vp->v_cstart = lbn + 1; 454 bawrite(bp); 455 } else { /* Wait for rest of cluster */ 456 vp->v_cstart = lbn; 457 bdwrite(bp); 458 } 459 } else if (lbn == vp->v_cstart + vp->v_clen) { 460 /* 461 * At end of cluster, write it out. 462 */ 463 cluster_wbuild(vp, bp, bp->b_bcount, vp->v_cstart, 464 vp->v_clen + 1, lbn); 465 vp->v_clen = 0; 466 vp->v_cstart = lbn + 1; 467 } else 468 /* 469 * In the middle of a cluster, so just delay the I/O for now. 470 */ 471 bdwrite(bp); 472 vp->v_lastw = lbn; 473 vp->v_lasta = bp->b_blkno; 474} 475 476 477/* 478 * This is an awful lot like cluster_rbuild...wish they could be combined. 479 * The last lbn argument is the current block on which I/O is being 480 * performed. Check to see that it doesn't fall in the middle of 481 * the current block (if last_bp == NULL). 482 */ 483void 484cluster_wbuild(vp, last_bp, size, start_lbn, len, lbn) 485 struct vnode *vp; 486 struct buf *last_bp; 487 long size; 488 daddr_t start_lbn; 489 int len; 490 daddr_t lbn; 491{ 492 struct cluster_save *b_save; 493 struct buf *bp, *tbp, *pb; 494 caddr_t cp; 495 int i, j, s; 496 497#ifdef DIAGNOSTIC 498 if (size != vp->v_mount->mnt_stat.f_iosize) 499 panic("cluster_wbuild: size %d != filesize %d\n", 500 size, vp->v_mount->mnt_stat.f_iosize); 501#endif 502redo: 503 if( (lbn != -1) || (last_bp == 0)) { 504 while ((!(tbp = incore(vp, start_lbn)) || (tbp->b_flags & B_BUSY) 505 || (start_lbn == lbn)) && len) { 506 ++start_lbn; 507 --len; 508 } 509 510 pb = trypbuf(); 511 /* Get more memory for current buffer */ 512 if (len <= 1 || pb == NULL) { 513 if (pb != NULL) 514 relpbuf(pb); 515 if (last_bp) { 516 bawrite(last_bp); 517 } else if (len) { 518 bp = getblk(vp, start_lbn, size, 0, 0); 519 bawrite(bp); 520 } 521 return; 522 } 523 tbp = getblk(vp, start_lbn, size, 0, 0); 524 } else { 525 tbp = last_bp; 526 if( tbp->b_flags & B_BUSY) { 527 printf("vfs_cluster: warning: buffer already busy\n"); 528 } 529 tbp->b_flags |= B_BUSY; 530 last_bp = 0; 531 pb = trypbuf(); 532 if (pb == NULL) { 533 bawrite(tbp); 534 return; 535 } 536 } 537 538 if (!(tbp->b_flags & B_DELWRI)) { 539 relpbuf(pb); 540 ++start_lbn; 541 --len; 542 brelse(tbp); 543 goto redo; 544 } 545 /* 546 * Extra memory in the buffer, punt on this buffer. XXX we could 547 * handle this in most cases, but we would have to push the extra 548 * memory down to after our max possible cluster size and then 549 * potentially pull it back up if the cluster was terminated 550 * prematurely--too much hassle. 551 */ 552 if (tbp->b_bcount != tbp->b_bufsize) { 553 relpbuf(pb); 554 ++start_lbn; 555 --len; 556 bawrite(tbp); 557 goto redo; 558 } 559 bp = pb; 560 b_save = malloc(sizeof(struct buf *) * (len + 1) + sizeof(struct cluster_save), 561 M_SEGMENT, M_WAITOK); 562 b_save->bs_nchildren = 0; 563 b_save->bs_children = (struct buf **) (b_save + 1); 564 bp->b_saveaddr = b_save; 565 bp->b_bcount = 0; 566 bp->b_bufsize = 0; 567 bp->b_npages = 0; 568 569 if (tbp->b_flags & B_VMIO) 570 bp->b_flags |= B_VMIO; 571 572 bp->b_blkno = tbp->b_blkno; 573 bp->b_lblkno = tbp->b_lblkno; 574 bp->b_flags |= B_CALL | B_BUSY | B_CLUSTER; 575 bp->b_iodone = cluster_callback; 576 pbgetvp(vp, bp); 577 578 for (i = 0; i < len; ++i, ++start_lbn) { 579 if (i != 0) { 580 /* 581 * Block is not in core or the non-sequential block 582 * ending our cluster was part of the cluster (in 583 * which case we don't want to write it twice). 584 */ 585 if (!(tbp = incore(vp, start_lbn)) || 586 (last_bp == NULL && start_lbn == lbn)) 587 break; 588 589 if ((tbp->b_flags & (B_INVAL | B_CLUSTEROK)) != B_CLUSTEROK) 590 break; 591 592 if ((tbp->b_npages + bp->b_npages) > (MAXPHYS / PAGE_SIZE)) 593 break; 594 595 /* 596 * Get the desired block buffer (unless it is the 597 * final sequential block whose buffer was passed in 598 * explictly as last_bp). 599 */ 600 if (last_bp == NULL || start_lbn != lbn) { 601 if( tbp->b_flags & B_BUSY) 602 break; 603 tbp = getblk(vp, start_lbn, size, 0, 0); 604 if (!(tbp->b_flags & B_DELWRI) || 605 ((tbp->b_flags & B_VMIO) != (bp->b_flags & B_VMIO))) { 606 brelse(tbp); 607 break; 608 } 609 } else 610 tbp = last_bp; 611 } 612 for (j = 0; j < tbp->b_npages; j += 1) { 613 bp->b_pages[j + bp->b_npages] = tbp->b_pages[j]; 614 } 615 bp->b_npages += tbp->b_npages; 616 bp->b_bcount += size; 617 bp->b_bufsize += size; 618 619 tbp->b_flags &= ~(B_READ | B_DONE | B_ERROR | B_DELWRI); 620 tbp->b_flags |= B_ASYNC; 621 s = splbio(); 622 reassignbuf(tbp, tbp->b_vp); /* put on clean list */ 623 ++tbp->b_vp->v_numoutput; 624 splx(s); 625 b_save->bs_children[i] = tbp; 626 } 627 b_save->bs_nchildren = i; 628 pmap_qenter((vm_offset_t) bp->b_data, (vm_page_t *) bp->b_pages, bp->b_npages); 629 bawrite(bp); 630 631 if (i < len) { 632 len -= i; 633 goto redo; 634 } 635} 636 637/* 638 * Collect together all the buffers in a cluster. 639 * Plus add one additional buffer. 640 */ 641struct cluster_save * 642cluster_collectbufs(vp, last_bp) 643 struct vnode *vp; 644 struct buf *last_bp; 645{ 646 struct cluster_save *buflist; 647 daddr_t lbn; 648 int i, len; 649 650 len = vp->v_lastw - vp->v_cstart + 1; 651 buflist = malloc(sizeof(struct buf *) * (len + 1) + sizeof(*buflist), 652 M_SEGMENT, M_WAITOK); 653 buflist->bs_nchildren = 0; 654 buflist->bs_children = (struct buf **) (buflist + 1); 655 for (lbn = vp->v_cstart, i = 0; i < len; lbn++, i++) 656 (void) bread(vp, lbn, last_bp->b_bcount, NOCRED, 657 &buflist->bs_children[i]); 658 buflist->bs_children[i] = last_bp; 659 buflist->bs_nchildren = i + 1; 660 return (buflist); 661} 662