Deleted Added
full compact
vfs_cluster.c (12662) vfs_cluster.c (12767)
1/*-
2 * Copyright (c) 1993
3 * The Regents of the University of California. All rights reserved.
4 * Modifications/enhancements:
5 * Copyright (c) 1995 John S. Dyson. All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions

--- 19 unchanged lines hidden (view full) ---

28 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
29 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
30 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
31 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
32 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
33 * SUCH DAMAGE.
34 *
35 * @(#)vfs_cluster.c 8.7 (Berkeley) 2/13/94
1/*-
2 * Copyright (c) 1993
3 * The Regents of the University of California. All rights reserved.
4 * Modifications/enhancements:
5 * Copyright (c) 1995 John S. Dyson. All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions

--- 19 unchanged lines hidden (view full) ---

28 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
29 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
30 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
31 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
32 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
33 * SUCH DAMAGE.
34 *
35 * @(#)vfs_cluster.c 8.7 (Berkeley) 2/13/94
36 * $Id: vfs_cluster.c,v 1.28 1995/11/20 04:53:45 dyson Exp $
36 * $Id: vfs_cluster.c,v 1.29 1995/12/07 12:47:03 davidg Exp $
37 */
38
39#include <sys/param.h>
40#include <sys/systm.h>
41#include <sys/proc.h>
42#include <sys/buf.h>
43#include <sys/vnode.h>
44#include <sys/mount.h>

--- 84 unchanged lines hidden (view full) ---

129 int seq;
130
131 error = 0;
132 /*
133 * get the requested block
134 */
135 origlblkno = lblkno;
136 *bpp = bp = getblk(vp, lblkno, size, 0, 0);
37 */
38
39#include <sys/param.h>
40#include <sys/systm.h>
41#include <sys/proc.h>
42#include <sys/buf.h>
43#include <sys/vnode.h>
44#include <sys/mount.h>

--- 84 unchanged lines hidden (view full) ---

129 int seq;
130
131 error = 0;
132 /*
133 * get the requested block
134 */
135 origlblkno = lblkno;
136 *bpp = bp = getblk(vp, lblkno, size, 0, 0);
137
137 seq = ISSEQREAD(vp, lblkno);
138 /*
139 * if it is in the cache, then check to see if the reads have been
140 * sequential. If they have, then try some read-ahead, otherwise
141 * back-off on prospective read-aheads.
142 */
143 if (bp->b_flags & B_CACHE) {
144 if (!seq) {
145 vp->v_maxra = bp->b_lblkno + bp->b_bcount / size;
146 vp->v_ralen >>= RA_SHIFTDOWN;
147 return 0;
148 } else if( vp->v_maxra > lblkno) {
138 seq = ISSEQREAD(vp, lblkno);
139 /*
140 * if it is in the cache, then check to see if the reads have been
141 * sequential. If they have, then try some read-ahead, otherwise
142 * back-off on prospective read-aheads.
143 */
144 if (bp->b_flags & B_CACHE) {
145 if (!seq) {
146 vp->v_maxra = bp->b_lblkno + bp->b_bcount / size;
147 vp->v_ralen >>= RA_SHIFTDOWN;
148 return 0;
149 } else if( vp->v_maxra > lblkno) {
149 if ( (vp->v_maxra + (vp->v_ralen / RA_MULTIPLE_SLOW)) >= (lblkno + vp->v_ralen)) {
150 if ( (vp->v_maxra + (vp->v_ralen / RA_MULTIPLE_SLOW)) >=
151 (lblkno + vp->v_ralen)) {
150 if ((vp->v_ralen + 1) < RA_MULTIPLE_FAST*(MAXPHYS / size))
151 ++vp->v_ralen;
152 return 0;
153 }
154 lblkno = vp->v_maxra;
155 } else {
156 lblkno += 1;
157 }

--- 27 unchanged lines hidden (view full) ---

185 /*
186 * this code makes sure that the stuff that we have read-ahead
187 * is still in the cache. If it isn't, we have been reading
188 * ahead too much, and we need to back-off, otherwise we might
189 * try to read more.
190 */
191 for (i = 0; i < vp->v_ralen; i++) {
192 rablkno = lblkno + i;
152 if ((vp->v_ralen + 1) < RA_MULTIPLE_FAST*(MAXPHYS / size))
153 ++vp->v_ralen;
154 return 0;
155 }
156 lblkno = vp->v_maxra;
157 } else {
158 lblkno += 1;
159 }

--- 27 unchanged lines hidden (view full) ---

187 /*
188 * this code makes sure that the stuff that we have read-ahead
189 * is still in the cache. If it isn't, we have been reading
190 * ahead too much, and we need to back-off, otherwise we might
191 * try to read more.
192 */
193 for (i = 0; i < vp->v_ralen; i++) {
194 rablkno = lblkno + i;
193 alreadyincore = (int) incore(vp, rablkno);
195 alreadyincore = (int) gbincore(vp, rablkno);
194 if (!alreadyincore) {
196 if (!alreadyincore) {
195 if (inmem(vp, rablkno)) {
196 if (vp->v_maxra < rablkno)
197 vp->v_maxra = rablkno + 1;
198 continue;
199 }
200 if (rablkno < vp->v_maxra) {
201 vp->v_maxra = rablkno;
202 vp->v_ralen >>= RA_SHIFTDOWN;
203 alreadyincore = 1;
204 }
205 break;
206 } else if (vp->v_maxra < rablkno) {
207 vp->v_maxra = rablkno + 1;
208 }
209 }
210 }
211 /*
212 * we now build the read-ahead buffer if it is desirable.
213 */
214 rbp = NULL;
215 if (!alreadyincore &&
197 if (rablkno < vp->v_maxra) {
198 vp->v_maxra = rablkno;
199 vp->v_ralen >>= RA_SHIFTDOWN;
200 alreadyincore = 1;
201 }
202 break;
203 } else if (vp->v_maxra < rablkno) {
204 vp->v_maxra = rablkno + 1;
205 }
206 }
207 }
208 /*
209 * we now build the read-ahead buffer if it is desirable.
210 */
211 rbp = NULL;
212 if (!alreadyincore &&
216 (rablkno + 1) * size <= filesize &&
213 ((u_quad_t)(rablkno + 1) * size) <= filesize &&
217 !(error = VOP_BMAP(vp, rablkno, NULL, &blkno, &num_ra, NULL)) &&
218 blkno != -1) {
219 if (num_ra > vp->v_ralen)
220 num_ra = vp->v_ralen;
221
222 if (num_ra) {
223 rbp = cluster_rbuild(vp, filesize, rablkno, blkno, size,
224 num_ra + 1);

--- 59 unchanged lines hidden (view full) ---

284 daddr_t bn;
285 int i, inc, j;
286
287#ifdef DIAGNOSTIC
288 if (size != vp->v_mount->mnt_stat.f_iosize)
289 panic("cluster_rbuild: size %d != filesize %d\n",
290 size, vp->v_mount->mnt_stat.f_iosize);
291#endif
214 !(error = VOP_BMAP(vp, rablkno, NULL, &blkno, &num_ra, NULL)) &&
215 blkno != -1) {
216 if (num_ra > vp->v_ralen)
217 num_ra = vp->v_ralen;
218
219 if (num_ra) {
220 rbp = cluster_rbuild(vp, filesize, rablkno, blkno, size,
221 num_ra + 1);

--- 59 unchanged lines hidden (view full) ---

281 daddr_t bn;
282 int i, inc, j;
283
284#ifdef DIAGNOSTIC
285 if (size != vp->v_mount->mnt_stat.f_iosize)
286 panic("cluster_rbuild: size %d != filesize %d\n",
287 size, vp->v_mount->mnt_stat.f_iosize);
288#endif
292 if (size * (lbn + run) > filesize)
289 /*
290 * avoid a division
291 */
292 while ((u_quad_t) size * (lbn + run) > filesize) {
293 --run;
293 --run;
294 }
294
295 tbp = getblk(vp, lbn, size, 0, 0);
296 if (tbp->b_flags & B_CACHE)
297 return tbp;
298
299 tbp->b_blkno = blkno;
300 tbp->b_flags |= B_ASYNC | B_READ;
301 if( ((tbp->b_flags & B_VMIO) == 0) || (run <= 1) )

--- 14 unchanged lines hidden (view full) ---

316
317 bp->b_bcount = 0;
318 bp->b_bufsize = 0;
319 bp->b_npages = 0;
320
321 inc = btodb(size);
322 for (bn = blkno, i = 0; i < run; ++i, bn += inc) {
323 if (i != 0) {
295
296 tbp = getblk(vp, lbn, size, 0, 0);
297 if (tbp->b_flags & B_CACHE)
298 return tbp;
299
300 tbp->b_blkno = blkno;
301 tbp->b_flags |= B_ASYNC | B_READ;
302 if( ((tbp->b_flags & B_VMIO) == 0) || (run <= 1) )

--- 14 unchanged lines hidden (view full) ---

317
318 bp->b_bcount = 0;
319 bp->b_bufsize = 0;
320 bp->b_npages = 0;
321
322 inc = btodb(size);
323 for (bn = blkno, i = 0; i < run; ++i, bn += inc) {
324 if (i != 0) {
324 if ((bp->b_npages * PAGE_SIZE) + size > MAXPHYS)
325 if ((bp->b_npages * PAGE_SIZE) +
326 round_page(size) > MAXPHYS)
325 break;
326
327 break;
328
327 if (incore(vp, lbn + i))
329 if (gbincore(vp, lbn + i))
328 break;
330 break;
331
329 tbp = getblk(vp, lbn + i, size, 0, 0);
330
331 if ((tbp->b_flags & B_CACHE) ||
332 (tbp->b_flags & B_VMIO) == 0) {
333 brelse(tbp);
334 break;
335 }
336

--- 8 unchanged lines hidden (view full) ---

345 * force buffer to be re-constituted later
346 */
347 tbp->b_flags |= B_RELBUF;
348 brelse(tbp);
349 break;
350 }
351
352 tbp->b_flags |= B_READ | B_ASYNC;
332 tbp = getblk(vp, lbn + i, size, 0, 0);
333
334 if ((tbp->b_flags & B_CACHE) ||
335 (tbp->b_flags & B_VMIO) == 0) {
336 brelse(tbp);
337 break;
338 }
339

--- 8 unchanged lines hidden (view full) ---

348 * force buffer to be re-constituted later
349 */
350 tbp->b_flags |= B_RELBUF;
351 brelse(tbp);
352 break;
353 }
354
355 tbp->b_flags |= B_READ | B_ASYNC;
353 if( tbp->b_blkno == tbp->b_lblkno) {
356 if (tbp->b_blkno == tbp->b_lblkno) {
354 tbp->b_blkno = bn;
355 } else if (tbp->b_blkno != bn) {
356 brelse(tbp);
357 break;
358 }
359 }
360 TAILQ_INSERT_TAIL(&bp->b_cluster.cluster_head,
361 tbp, b_cluster.cluster_entry);

--- 95 unchanged lines hidden (view full) ---

457 * If we are not writing at end of file, the process
458 * seeked to another point in the file since its last
459 * write, or we have reached our maximum cluster size,
460 * then push the previous cluster. Otherwise try
461 * reallocating to make it sequential.
462 */
463 cursize = vp->v_lastw - vp->v_cstart + 1;
464#if 1
357 tbp->b_blkno = bn;
358 } else if (tbp->b_blkno != bn) {
359 brelse(tbp);
360 break;
361 }
362 }
363 TAILQ_INSERT_TAIL(&bp->b_cluster.cluster_head,
364 tbp, b_cluster.cluster_entry);

--- 95 unchanged lines hidden (view full) ---

460 * If we are not writing at end of file, the process
461 * seeked to another point in the file since its last
462 * write, or we have reached our maximum cluster size,
463 * then push the previous cluster. Otherwise try
464 * reallocating to make it sequential.
465 */
466 cursize = vp->v_lastw - vp->v_cstart + 1;
467#if 1
465 if ((lbn + 1) * lblocksize != filesize ||
468 if (((u_quad_t)(lbn + 1) * lblocksize) != filesize ||
466 lbn != vp->v_lastw + 1 ||
467 vp->v_clen <= cursize) {
468 if (!async)
469 cluster_wbuild(vp, lblocksize,
470 vp->v_cstart, cursize);
471 }
472#else
473 if (!doreallocblks ||

--- 34 unchanged lines hidden (view full) ---

508 }
509#endif
510 }
511 /*
512 * Consider beginning a cluster. If at end of file, make
513 * cluster as large as possible, otherwise find size of
514 * existing cluster.
515 */
469 lbn != vp->v_lastw + 1 ||
470 vp->v_clen <= cursize) {
471 if (!async)
472 cluster_wbuild(vp, lblocksize,
473 vp->v_cstart, cursize);
474 }
475#else
476 if (!doreallocblks ||

--- 34 unchanged lines hidden (view full) ---

511 }
512#endif
513 }
514 /*
515 * Consider beginning a cluster. If at end of file, make
516 * cluster as large as possible, otherwise find size of
517 * existing cluster.
518 */
516 if ((lbn + 1) * lblocksize != filesize &&
519 if (((u_quad_t) (lbn + 1) * lblocksize) != filesize &&
517 (bp->b_blkno == bp->b_lblkno) &&
518 (VOP_BMAP(vp, lbn, NULL, &bp->b_blkno, &maxclen, NULL) ||
519 bp->b_blkno == -1)) {
520 bawrite(bp);
521 vp->v_clen = 0;
522 vp->v_lasta = bp->b_blkno;
523 vp->v_cstart = lbn + 1;
524 vp->v_lastw = lbn;
525 return;
526 }
527 vp->v_clen = maxclen;
528 if (!async && maxclen == 0) { /* I/O not contiguous */
529 vp->v_cstart = lbn + 1;
520 (bp->b_blkno == bp->b_lblkno) &&
521 (VOP_BMAP(vp, lbn, NULL, &bp->b_blkno, &maxclen, NULL) ||
522 bp->b_blkno == -1)) {
523 bawrite(bp);
524 vp->v_clen = 0;
525 vp->v_lasta = bp->b_blkno;
526 vp->v_cstart = lbn + 1;
527 vp->v_lastw = lbn;
528 return;
529 }
530 vp->v_clen = maxclen;
531 if (!async && maxclen == 0) { /* I/O not contiguous */
532 vp->v_cstart = lbn + 1;
530 bawrite(bp);
533 if (!async)
534 bawrite(bp);
535 else
536 bdwrite(bp);
531 } else { /* Wait for rest of cluster */
532 vp->v_cstart = lbn;
533 bdwrite(bp);
534 }
535 } else if (lbn == vp->v_cstart + vp->v_clen) {
536 /*
537 * At end of cluster, write it out.
538 */

--- 13 unchanged lines hidden (view full) ---

552
553
554/*
555 * This is an awful lot like cluster_rbuild...wish they could be combined.
556 * The last lbn argument is the current block on which I/O is being
557 * performed. Check to see that it doesn't fall in the middle of
558 * the current block (if last_bp == NULL).
559 */
537 } else { /* Wait for rest of cluster */
538 vp->v_cstart = lbn;
539 bdwrite(bp);
540 }
541 } else if (lbn == vp->v_cstart + vp->v_clen) {
542 /*
543 * At end of cluster, write it out.
544 */

--- 13 unchanged lines hidden (view full) ---

558
559
560/*
561 * This is an awful lot like cluster_rbuild...wish they could be combined.
562 * The last lbn argument is the current block on which I/O is being
563 * performed. Check to see that it doesn't fall in the middle of
564 * the current block (if last_bp == NULL).
565 */
560void
566int
561cluster_wbuild(vp, size, start_lbn, len)
562 struct vnode *vp;
563 long size;
564 daddr_t start_lbn;
565 int len;
566{
567 struct buf *bp, *tbp;
568 int i, j, s;
567cluster_wbuild(vp, size, start_lbn, len)
568 struct vnode *vp;
569 long size;
570 daddr_t start_lbn;
571 int len;
572{
573 struct buf *bp, *tbp;
574 int i, j, s;
575 int totalwritten = 0;
569 int dbsize = btodb(size);
576 int dbsize = btodb(size);
570 int origlen = len;
577 while (len > 0) {
578 s = splbio();
579 if ( ((tbp = gbincore(vp, start_lbn)) == NULL) ||
580 ((tbp->b_flags & (B_INVAL|B_BUSY|B_DELWRI)) != B_DELWRI)) {
581 ++start_lbn;
582 --len;
583 splx(s);
584 continue;
585 }
586 bremfree(tbp);
587 tbp->b_flags |= B_BUSY;
588 tbp->b_flags &= ~B_DONE;
589 splx(s);
571
590
572redo:
573 if (len == 0)
574 return;
575 if ( ((tbp = incore(vp, start_lbn)) == NULL) ||
576 ((tbp->b_flags & (B_INVAL|B_BUSY|B_DELWRI)) != B_DELWRI)) {
577 ++start_lbn;
578 --len;
579 goto redo;
580 }
581
582 tbp = getblk(vp, start_lbn, size, 0, 0);
583 if ((tbp->b_flags & B_DELWRI) == 0) {
584 ++start_lbn;
585 --len;
586 brelse(tbp);
587 goto redo;
588 }
589 /*
590 * Extra memory in the buffer, punt on this buffer. XXX we could
591 * handle this in most cases, but we would have to push the extra
592 * memory down to after our max possible cluster size and then
593 * potentially pull it back up if the cluster was terminated
594 * prematurely--too much hassle.
595 */
591 /*
592 * Extra memory in the buffer, punt on this buffer. XXX we could
593 * handle this in most cases, but we would have to push the extra
594 * memory down to after our max possible cluster size and then
595 * potentially pull it back up if the cluster was terminated
596 * prematurely--too much hassle.
597 */
596 if (((tbp->b_flags & (B_VMIO|B_CLUSTEROK)) != (B_VMIO|B_CLUSTEROK)) ||
597 (tbp->b_bcount != tbp->b_bufsize) ||
598 len == 1) {
599 bawrite(tbp);
600 ++start_lbn;
601 --len;
602 goto redo;
603 }
598 if (((tbp->b_flags & B_CLUSTEROK) != B_CLUSTEROK) ||
599 (tbp->b_bcount != tbp->b_bufsize) ||
600 (tbp->b_bcount != size) ||
601 len == 1) {
602 totalwritten += tbp->b_bufsize;
603 bawrite(tbp);
604 ++start_lbn;
605 --len;
606 continue;
607 }
604
608
605 bp = trypbuf();
606 if (bp == NULL) {
607 bawrite(tbp);
608 ++start_lbn;
609 --len;
610 goto redo;
611 }
609 bp = trypbuf();
610 if (bp == NULL) {
611 totalwritten += tbp->b_bufsize;
612 bawrite(tbp);
613 ++start_lbn;
614 --len;
615 continue;
616 }
612
617
613 TAILQ_INIT(&bp->b_cluster.cluster_head);
614 bp->b_bcount = 0;
615 bp->b_bufsize = 0;
616 bp->b_npages = 0;
618 TAILQ_INIT(&bp->b_cluster.cluster_head);
619 bp->b_bcount = 0;
620 bp->b_bufsize = 0;
621 bp->b_npages = 0;
617
622
618 bp->b_blkno = tbp->b_blkno;
619 bp->b_lblkno = tbp->b_lblkno;
620 (vm_offset_t) bp->b_data |= ((vm_offset_t) tbp->b_data) & PAGE_MASK;
621 bp->b_flags |= B_CALL | B_BUSY | B_CLUSTER;
622 bp->b_iodone = cluster_callback;
623 pbgetvp(vp, bp);
623 bp->b_blkno = tbp->b_blkno;
624 bp->b_lblkno = tbp->b_lblkno;
625 (vm_offset_t) bp->b_data |= ((vm_offset_t) tbp->b_data) & PAGE_MASK;
626 bp->b_flags |= B_CALL | B_BUSY | B_CLUSTER | (tbp->b_flags & B_VMIO);
627 bp->b_iodone = cluster_callback;
628 pbgetvp(vp, bp);
624
629
625 for (i = 0; i < len; ++i, ++start_lbn) {
626 if (i != 0) {
627 s = splbio();
628 if ((tbp = incore(vp, start_lbn)) == NULL) {
629 splx(s);
630 break;
631 }
630 for (i = 0; i < len; ++i, ++start_lbn) {
631 if (i != 0) {
632 s = splbio();
633 if ((tbp = gbincore(vp, start_lbn)) == NULL) {
634 splx(s);
635 break;
636 }
632
637
633 if ((tbp->b_flags & (B_CLUSTEROK|B_INVAL|B_BUSY|B_DELWRI)) != (B_DELWRI|B_CLUSTEROK)) {
634 splx(s);
635 break;
636 }
638 if ((tbp->b_flags & (B_VMIO|B_CLUSTEROK|B_INVAL|B_BUSY|B_DELWRI)) != (B_DELWRI|B_CLUSTEROK|(bp->b_flags & B_VMIO))) {
639 splx(s);
640 break;
641 }
637
642
638 if ((tbp->b_bcount != size) ||
639 ((bp->b_blkno + dbsize * i) != tbp->b_blkno) ||
640 ((tbp->b_npages + bp->b_npages) > (MAXPHYS / PAGE_SIZE))) {
643 if ((tbp->b_bcount != size) ||
644 ((bp->b_blkno + dbsize * i) != tbp->b_blkno) ||
645 ((tbp->b_npages + bp->b_npages) > (MAXPHYS / PAGE_SIZE))) {
646 splx(s);
647 break;
648 }
649 bremfree(tbp);
650 tbp->b_flags |= B_BUSY;
651 tbp->b_flags &= ~B_DONE;
641 splx(s);
652 splx(s);
642 break;
643 }
653 }
644 bremfree(tbp);
645 tbp->b_flags |= B_BUSY;
646 tbp->b_flags &= ~B_DONE;
654 for (j = 0; j < tbp->b_npages; j += 1) {
655 vm_page_t m;
656 m = tbp->b_pages[j];
657 ++m->busy;
658 ++m->object->paging_in_progress;
659 if ((bp->b_npages == 0) ||
660 (bp->b_pages[bp->b_npages - 1] != m)) {
661 bp->b_pages[bp->b_npages] = m;
662 bp->b_npages++;
663 }
664 }
665 bp->b_bcount += size;
666 bp->b_bufsize += size;
667
668 tbp->b_flags &= ~(B_READ | B_DONE | B_ERROR | B_DELWRI);
669 tbp->b_flags |= B_ASYNC;
670 s = splbio();
671 reassignbuf(tbp, tbp->b_vp); /* put on clean list */
672 ++tbp->b_vp->v_numoutput;
647 splx(s);
673 splx(s);
674 TAILQ_INSERT_TAIL(&bp->b_cluster.cluster_head,
675 tbp, b_cluster.cluster_entry);
648 }
676 }
649 for (j = 0; j < tbp->b_npages; j += 1) {
650 vm_page_t m;
651 m = tbp->b_pages[j];
652 ++m->busy;
653 ++m->object->paging_in_progress;
654 if ((bp->b_npages == 0) ||
655 (bp->b_pages[bp->b_npages - 1] != m)) {
656 bp->b_pages[bp->b_npages] = m;
657 bp->b_npages++;
658 }
659 }
660 bp->b_bcount += size;
661 bp->b_bufsize += size;
677 pmap_qenter(trunc_page((vm_offset_t) bp->b_data),
678 (vm_page_t *) bp->b_pages, bp->b_npages);
679 totalwritten += bp->b_bufsize;
680 bawrite(bp);
662
681
663 tbp->b_flags &= ~(B_READ | B_DONE | B_ERROR | B_DELWRI);
664 tbp->b_flags |= B_ASYNC;
665 s = splbio();
666 reassignbuf(tbp, tbp->b_vp); /* put on clean list */
667 ++tbp->b_vp->v_numoutput;
668 splx(s);
669 TAILQ_INSERT_TAIL(&bp->b_cluster.cluster_head,
670 tbp, b_cluster.cluster_entry);
682 len -= i;
671 }
683 }
672 pmap_qenter(trunc_page((vm_offset_t) bp->b_data),
673 (vm_page_t *) bp->b_pages, bp->b_npages);
674 bawrite(bp);
675
676 len -= i;
677 goto redo;
684 return totalwritten;
678}
679
680#if 0
681/*
682 * Collect together all the buffers in a cluster.
683 * Plus add one additional buffer.
684 */
685struct cluster_save *

--- 21 unchanged lines hidden ---
685}
686
687#if 0
688/*
689 * Collect together all the buffers in a cluster.
690 * Plus add one additional buffer.
691 */
692struct cluster_save *

--- 21 unchanged lines hidden ---