vfs_cluster.c revision 10551
1/*-
2 * Copyright (c) 1993
3 *	The Regents of the University of California.  All rights reserved.
4 * Modifications/enhancements:
5 * 	Copyright (c) 1995 John S. Dyson.  All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 *    notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 *    notice, this list of conditions and the following disclaimer in the
14 *    documentation and/or other materials provided with the distribution.
15 * 3. All advertising materials mentioning features or use of this software
16 *    must display the following acknowledgement:
17 *	This product includes software developed by the University of
18 *	California, Berkeley and its contributors.
19 * 4. Neither the name of the University nor the names of its contributors
20 *    may be used to endorse or promote products derived from this software
21 *    without specific prior written permission.
22 *
23 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
24 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
25 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
26 * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
27 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
28 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
29 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
30 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
31 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
32 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
33 * SUCH DAMAGE.
34 *
35 *	@(#)vfs_cluster.c	8.7 (Berkeley) 2/13/94
36 * $Id: vfs_cluster.c,v 1.19 1995/09/03 20:32:52 dyson Exp $
37 */
38
39#include <sys/param.h>
40#include <sys/systm.h>
41#include <sys/proc.h>
42#include <sys/buf.h>
43#include <sys/vnode.h>
44#include <sys/mount.h>
45#include <sys/malloc.h>
46#include <sys/resourcevar.h>
47#include <sys/vmmeter.h>
48#include <miscfs/specfs/specdev.h>
49#include <vm/vm.h>
50#include <vm/vm_object.h>
51#include <vm/vm_page.h>
52
53#ifdef DEBUG
54#include <vm/vm.h>
55#include <sys/sysctl.h>
56int doreallocblks = 0;
57struct ctldebug debug13 = {"doreallocblks", &doreallocblks};
58
59#else
60/* XXX for cluster_write */
61#define doreallocblks 0
62#endif
63
64/*
65 * Local declarations
66 */
67static struct buf *cluster_rbuild __P((struct vnode *, u_quad_t,
68    daddr_t, daddr_t, long, int));
69struct cluster_save *cluster_collectbufs __P((struct vnode *, struct buf *));
70
71int totreads;
72int totreadblocks;
73extern vm_page_t bogus_page;
74
75#ifdef DIAGNOSTIC
76/*
77 * Set to 1 if reads of block zero should cause readahead to be done.
78 * Set to 0 treats a read of block zero as a non-sequential read.
79 *
80 * Setting to one assumes that most reads of block zero of files are due to
81 * sequential passes over the files (e.g. cat, sum) where additional blocks
82 * will soon be needed.  Setting to zero assumes that the majority are
83 * surgical strikes to get particular info (e.g. size, file) where readahead
84 * blocks will not be used and, in fact, push out other potentially useful
85 * blocks from the cache.  The former seems intuitive, but some quick tests
86 * showed that the latter performed better from a system-wide point of view.
87 */
88	int doclusterraz = 0;
89
90#define ISSEQREAD(vp, blk) \
91	(((blk) != 0 || doclusterraz) && \
92	 ((blk) == (vp)->v_lastr + 1 || (blk) == (vp)->v_lastr))
93#else
94#define ISSEQREAD(vp, blk) \
95	(/* (blk) != 0 && */ ((blk) == (vp)->v_lastr + 1 || (blk) == (vp)->v_lastr))
96#endif
97
98/*
99 * allow for three entire read-aheads...  The system will
100 * adjust downwards rapidly if needed...
101 */
102#define RA_MULTIPLE_FAST	2
103#define RA_MULTIPLE_SLOW	3
104#define RA_SHIFTDOWN	1	/* approx lg2(RA_MULTIPLE) */
105/*
106 * This replaces bread.  If this is a bread at the beginning of a file and
107 * lastr is 0, we assume this is the first read and we'll read up to two
108 * blocks if they are sequential.  After that, we'll do regular read ahead
109 * in clustered chunks.
110 * 	bp is the block requested.
111 *	rbp is the read-ahead block.
112 *	If either is NULL, then you don't have to do the I/O.
113 */
114int
115cluster_read(vp, filesize, lblkno, size, cred, bpp)
116	struct vnode *vp;
117	u_quad_t filesize;
118	daddr_t lblkno;
119	long size;
120	struct ucred *cred;
121	struct buf **bpp;
122{
123	struct buf *bp, *rbp;
124	daddr_t blkno, rablkno, origlblkno;
125	long flags;
126	int error, num_ra, alreadyincore;
127	int i;
128	int seq;
129
130	error = 0;
131	/*
132	 * get the requested block
133	 */
134	origlblkno = lblkno;
135	*bpp = bp = getblk(vp, lblkno, size, 0, 0);
136	seq = ISSEQREAD(vp, lblkno);
137	/*
138	 * if it is in the cache, then check to see if the reads have been
139	 * sequential.  If they have, then try some read-ahead, otherwise
140	 * back-off on prospective read-aheads.
141	 */
142	if (bp->b_flags & B_CACHE) {
143		if (!seq) {
144			vp->v_maxra = bp->b_lblkno + bp->b_bcount / size;
145			vp->v_ralen >>= RA_SHIFTDOWN;
146			return 0;
147		} else if( vp->v_maxra > lblkno) {
148			if ( (vp->v_maxra + (vp->v_ralen / RA_MULTIPLE_SLOW)) >= (lblkno + vp->v_ralen)) {
149				if ((vp->v_ralen + 1) < RA_MULTIPLE_FAST*(MAXPHYS / size))
150					++vp->v_ralen;
151				return 0;
152			}
153			lblkno = vp->v_maxra;
154		} else {
155			lblkno += 1;
156		}
157		bp = NULL;
158	} else {
159		/*
160		 * if it isn't in the cache, then get a chunk from disk if
161		 * sequential, otherwise just get the block.
162		 */
163		bp->b_flags |= B_READ;
164		lblkno += 1;
165		curproc->p_stats->p_ru.ru_inblock++;	/* XXX */
166		vp->v_ralen = 0;
167	}
168	/*
169	 * assume no read-ahead
170	 */
171	alreadyincore = 1;
172	rablkno = lblkno;
173
174	/*
175	 * if we have been doing sequential I/O, then do some read-ahead
176	 */
177	if (seq) {
178
179	/*
180	 * bump ralen a bit...
181	 */
182		if ((vp->v_ralen + 1) < RA_MULTIPLE_SLOW*(MAXPHYS / size))
183			++vp->v_ralen;
184		/*
185		 * this code makes sure that the stuff that we have read-ahead
186		 * is still in the cache.  If it isn't, we have been reading
187		 * ahead too much, and we need to back-off, otherwise we might
188		 * try to read more.
189		 */
190		for (i = 0; i < vp->v_ralen; i++) {
191			rablkno = lblkno + i;
192			alreadyincore = (int) incore(vp, rablkno);
193			if (!alreadyincore) {
194				if (inmem(vp, rablkno)) {
195					struct buf *bpt;
196					if (vp->v_maxra < rablkno)
197						vp->v_maxra = rablkno + 1;
198					continue;
199				}
200				if (rablkno < vp->v_maxra) {
201					vp->v_maxra = rablkno;
202					vp->v_ralen >>= RA_SHIFTDOWN;
203					alreadyincore = 1;
204				}
205				break;
206			} else if (vp->v_maxra < rablkno) {
207				vp->v_maxra = rablkno + 1;
208			}
209		}
210	}
211	/*
212	 * we now build the read-ahead buffer if it is desirable.
213	 */
214	rbp = NULL;
215	if (!alreadyincore &&
216	    (rablkno + 1) * size <= filesize &&
217	    !(error = VOP_BMAP(vp, rablkno, NULL, &blkno, &num_ra, NULL)) &&
218	    blkno != -1) {
219		if (num_ra > vp->v_ralen)
220			num_ra = vp->v_ralen;
221
222		if (num_ra) {
223			rbp = cluster_rbuild(vp, filesize, rablkno, blkno, size,
224				num_ra + 1);
225		} else {
226			rbp = getblk(vp, rablkno, size, 0, 0);
227			rbp->b_flags |= B_READ | B_ASYNC;
228			rbp->b_blkno = blkno;
229		}
230	}
231
232	/*
233	 * handle the synchronous read
234	 */
235	if (bp) {
236		if (bp->b_flags & (B_DONE | B_DELWRI))
237			panic("cluster_read: DONE bp");
238		else {
239			vfs_busy_pages(bp, 0);
240			error = VOP_STRATEGY(bp);
241			vp->v_maxra = bp->b_lblkno + bp->b_bcount / size;
242			totreads++;
243			totreadblocks += bp->b_bcount / size;
244			curproc->p_stats->p_ru.ru_inblock++;
245		}
246	}
247	/*
248	 * and if we have read-aheads, do them too
249	 */
250	if (rbp) {
251		vp->v_maxra = rbp->b_lblkno + rbp->b_bcount / size;
252		if (error || (rbp->b_flags & B_CACHE)) {
253			rbp->b_flags &= ~(B_ASYNC | B_READ);
254			brelse(rbp);
255		} else {
256			if ((rbp->b_flags & B_CLUSTER) == 0)
257				vfs_busy_pages(rbp, 0);
258			(void) VOP_STRATEGY(rbp);
259			totreads++;
260			totreadblocks += rbp->b_bcount / size;
261			curproc->p_stats->p_ru.ru_inblock++;
262		}
263	}
264	if (bp && ((bp->b_flags & B_ASYNC) == 0))
265		return (biowait(bp));
266	return (error);
267}
268
269/*
270 * If blocks are contiguous on disk, use this to provide clustered
271 * read ahead.  We will read as many blocks as possible sequentially
272 * and then parcel them up into logical blocks in the buffer hash table.
273 */
274static struct buf *
275cluster_rbuild(vp, filesize, lbn, blkno, size, run)
276	struct vnode *vp;
277	u_quad_t filesize;
278	daddr_t lbn;
279	daddr_t blkno;
280	long size;
281	int run;
282{
283	struct cluster_save *b_save;
284	struct buf *bp, *tbp;
285	daddr_t bn;
286	int i, inc, j;
287
288#ifdef DIAGNOSTIC
289	if (size != vp->v_mount->mnt_stat.f_iosize)
290		panic("cluster_rbuild: size %d != filesize %d\n",
291		    size, vp->v_mount->mnt_stat.f_iosize);
292#endif
293	if (size * (lbn + run + 1) > filesize)
294		--run;
295
296	tbp = getblk(vp, lbn, size, 0, 0);
297	if (tbp->b_flags & B_CACHE)
298		return tbp;
299
300	tbp->b_blkno = blkno;
301	tbp->b_flags |= B_ASYNC | B_READ;
302	if( ((tbp->b_flags & B_VMIO) == 0) || (run <= 1) )
303		return tbp;
304
305	bp = trypbuf();
306	if (bp == 0)
307		return tbp;
308
309	(vm_offset_t) bp->b_data |= ((vm_offset_t) tbp->b_data) & PAGE_MASK;
310	bp->b_flags = B_ASYNC | B_READ | B_CALL | B_BUSY | B_CLUSTER | B_VMIO;
311	bp->b_iodone = cluster_callback;
312	bp->b_blkno = blkno;
313	bp->b_lblkno = lbn;
314	pbgetvp(vp, bp);
315
316	b_save = malloc(sizeof(struct buf *) * run + sizeof(struct cluster_save),
317	    M_SEGMENT, M_WAITOK);
318	b_save->bs_nchildren = 0;
319	b_save->bs_children = (struct buf **) (b_save + 1);
320	bp->b_saveaddr = b_save;
321
322	bp->b_bcount = 0;
323	bp->b_bufsize = 0;
324	bp->b_npages = 0;
325
326	inc = btodb(size);
327	for (bn = blkno, i = 0; i < run; ++i, bn += inc) {
328		if (i != 0) {
329			if ((bp->b_npages * PAGE_SIZE) + size > MAXPHYS)
330				break;
331			if (incore(vp, lbn + i))
332				break;
333			tbp = getblk(vp, lbn + i, size, 0, 0);
334
335			if ((tbp->b_flags & B_CACHE) ||
336				(tbp->b_flags & B_VMIO) == 0) {
337				brelse(tbp);
338				break;
339			}
340
341			for (j=0;j<tbp->b_npages;j++) {
342				if (tbp->b_pages[j]->valid) {
343					break;
344				}
345			}
346
347			if (j != tbp->b_npages) {
348				brelse(tbp);
349				break;
350			}
351
352			tbp->b_flags |= B_READ | B_ASYNC;
353			if( tbp->b_blkno == tbp->b_lblkno) {
354				tbp->b_blkno = bn;
355			} else if (tbp->b_blkno != bn) {
356				brelse(tbp);
357				break;
358			}
359		}
360		++b_save->bs_nchildren;
361		b_save->bs_children[i] = tbp;
362		for (j = 0; j < tbp->b_npages; j += 1) {
363			vm_page_t m;
364			m = tbp->b_pages[j];
365			++m->busy;
366			++m->object->paging_in_progress;
367			if (m->valid == VM_PAGE_BITS_ALL) {
368				m = bogus_page;
369			}
370			if ((bp->b_npages == 0) ||
371				(bp->b_pages[bp->b_npages - 1] != m)) {
372				bp->b_pages[bp->b_npages] = m;
373				bp->b_npages++;
374			}
375		}
376		bp->b_bcount += tbp->b_bcount;
377		bp->b_bufsize += tbp->b_bufsize;
378	}
379	pmap_qenter(trunc_page((vm_offset_t) bp->b_data),
380		(vm_page_t *)bp->b_pages, bp->b_npages);
381	return (bp);
382}
383
384/*
385 * Cleanup after a clustered read or write.
386 * This is complicated by the fact that any of the buffers might have
387 * extra memory (if there were no empty buffer headers at allocbuf time)
388 * that we will need to shift around.
389 */
390void
391cluster_callback(bp)
392	struct buf *bp;
393{
394	struct cluster_save *b_save;
395	struct buf **bpp, *tbp;
396	caddr_t cp;
397	int error = 0;
398
399	/*
400	 * Must propogate errors to all the components.
401	 */
402	if (bp->b_flags & B_ERROR)
403		error = bp->b_error;
404
405	b_save = (struct cluster_save *) (bp->b_saveaddr);
406	pmap_qremove(trunc_page((vm_offset_t) bp->b_data), bp->b_npages);
407	/*
408	 * Move memory from the large cluster buffer into the component
409	 * buffers and mark IO as done on these.
410	 */
411	for (bpp = b_save->bs_children; b_save->bs_nchildren--; ++bpp) {
412		tbp = *bpp;
413		if (error) {
414			tbp->b_flags |= B_ERROR;
415			tbp->b_error = error;
416		}
417		biodone(tbp);
418	}
419	free(b_save, M_SEGMENT);
420	relpbuf(bp);
421}
422
423/*
424 * Do clustered write for FFS.
425 *
426 * Three cases:
427 *	1. Write is not sequential (write asynchronously)
428 *	Write is sequential:
429 *	2.	beginning of cluster - begin cluster
430 *	3.	middle of a cluster - add to cluster
431 *	4.	end of a cluster - asynchronously write cluster
432 */
433void
434cluster_write(bp, filesize)
435	struct buf *bp;
436	u_quad_t filesize;
437{
438	struct vnode *vp;
439	daddr_t lbn;
440	int maxclen, cursize;
441	int lblocksize;
442
443	vp = bp->b_vp;
444	lblocksize = vp->v_mount->mnt_stat.f_iosize;
445	lbn = bp->b_lblkno;
446
447	/* Initialize vnode to beginning of file. */
448	if (lbn == 0)
449		vp->v_lasta = vp->v_clen = vp->v_cstart = vp->v_lastw = 0;
450
451	if (vp->v_clen == 0 || lbn != vp->v_lastw + 1 ||
452	    (bp->b_blkno != vp->v_lasta + btodb(lblocksize))) {
453		maxclen = MAXPHYS / lblocksize - 1;
454		if (vp->v_clen != 0) {
455			/*
456			 * Next block is not sequential.
457			 *
458			 * If we are not writing at end of file, the process
459			 * seeked to another point in the file since its last
460			 * write, or we have reached our maximum cluster size,
461			 * then push the previous cluster. Otherwise try
462			 * reallocating to make it sequential.
463			 */
464			cursize = vp->v_lastw - vp->v_cstart + 1;
465			if (!doreallocblks ||
466			    (lbn + 1) * lblocksize != filesize ||
467			    lbn != vp->v_lastw + 1 || vp->v_clen <= cursize) {
468				cluster_wbuild(vp, NULL, lblocksize,
469				    vp->v_cstart, cursize, lbn);
470			} else {
471				struct buf **bpp, **endbp;
472				struct cluster_save *buflist;
473
474				buflist = cluster_collectbufs(vp, bp);
475				endbp = &buflist->bs_children
476				    [buflist->bs_nchildren - 1];
477				if (VOP_REALLOCBLKS(vp, buflist)) {
478					/*
479					 * Failed, push the previous cluster.
480					 */
481					for (bpp = buflist->bs_children;
482					     bpp < endbp; bpp++)
483						brelse(*bpp);
484					free(buflist, M_SEGMENT);
485					cluster_wbuild(vp, NULL, lblocksize,
486					    vp->v_cstart, cursize, lbn);
487				} else {
488					/*
489					 * Succeeded, keep building cluster.
490					 */
491					for (bpp = buflist->bs_children;
492					     bpp <= endbp; bpp++)
493						bdwrite(*bpp);
494					free(buflist, M_SEGMENT);
495					vp->v_lastw = lbn;
496					vp->v_lasta = bp->b_blkno;
497					return;
498				}
499			}
500		}
501		/*
502		 * Consider beginning a cluster. If at end of file, make
503		 * cluster as large as possible, otherwise find size of
504		 * existing cluster.
505		 */
506		if ((lbn + 1) * lblocksize != filesize &&
507		    (bp->b_blkno == bp->b_lblkno) &&
508		    (VOP_BMAP(vp, lbn, NULL, &bp->b_blkno, &maxclen, NULL) ||
509		     bp->b_blkno == -1)) {
510			bawrite(bp);
511			vp->v_clen = 0;
512			vp->v_lasta = bp->b_blkno;
513			vp->v_cstart = lbn + 1;
514			vp->v_lastw = lbn;
515			return;
516		}
517		vp->v_clen = maxclen;
518		if (maxclen == 0) {	/* I/O not contiguous */
519			vp->v_cstart = lbn + 1;
520			bawrite(bp);
521		} else {	/* Wait for rest of cluster */
522			vp->v_cstart = lbn;
523			bdwrite(bp);
524		}
525	} else if (lbn == vp->v_cstart + vp->v_clen) {
526		/*
527		 * At end of cluster, write it out.
528		 */
529		cluster_wbuild(vp, bp, bp->b_bcount, vp->v_cstart,
530		    vp->v_clen + 1, lbn);
531		vp->v_clen = 0;
532		vp->v_cstart = lbn + 1;
533	} else
534		/*
535		 * In the middle of a cluster, so just delay the I/O for now.
536		 */
537		bdwrite(bp);
538	vp->v_lastw = lbn;
539	vp->v_lasta = bp->b_blkno;
540}
541
542
543/*
544 * This is an awful lot like cluster_rbuild...wish they could be combined.
545 * The last lbn argument is the current block on which I/O is being
546 * performed.  Check to see that it doesn't fall in the middle of
547 * the current block (if last_bp == NULL).
548 */
549void
550cluster_wbuild(vp, last_bp, size, start_lbn, len, lbn)
551	struct vnode *vp;
552	struct buf *last_bp;
553	long size;
554	daddr_t start_lbn;
555	int len;
556	daddr_t lbn;
557{
558	struct cluster_save *b_save;
559	struct buf *bp, *tbp, *pb;
560	caddr_t cp;
561	int i, j, s;
562
563#ifdef DIAGNOSTIC
564	if (size != vp->v_mount->mnt_stat.f_iosize)
565		panic("cluster_wbuild: size %d != filesize %d\n",
566		    size, vp->v_mount->mnt_stat.f_iosize);
567#endif
568redo:
569	if( (lbn != -1) || (last_bp == 0)) {
570		while ((!(tbp = incore(vp, start_lbn)) || (tbp->b_flags & B_BUSY)
571			|| (start_lbn == lbn)) && len) {
572			++start_lbn;
573			--len;
574		}
575
576		pb = trypbuf();
577		/* Get more memory for current buffer */
578		if (len <= 1 || pb == NULL) {
579			if (pb != NULL)
580				relpbuf(pb);
581			if (last_bp) {
582				bawrite(last_bp);
583			} else if (len) {
584				bp = getblk(vp, start_lbn, size, 0, 0);
585				bawrite(bp);
586			}
587			return;
588		}
589		tbp = getblk(vp, start_lbn, size, 0, 0);
590	} else {
591		tbp = last_bp;
592		if( tbp->b_flags & B_BUSY) {
593			printf("vfs_cluster: warning: buffer already busy\n");
594		}
595		tbp->b_flags |= B_BUSY;
596		last_bp = 0;
597		pb = trypbuf();
598		if (pb == NULL) {
599			bawrite(tbp);
600			return;
601		}
602	}
603
604	if (!(tbp->b_flags & B_DELWRI)) {
605		relpbuf(pb);
606		++start_lbn;
607		--len;
608		brelse(tbp);
609		goto redo;
610	}
611	/*
612	 * Extra memory in the buffer, punt on this buffer. XXX we could
613	 * handle this in most cases, but we would have to push the extra
614	 * memory down to after our max possible cluster size and then
615	 * potentially pull it back up if the cluster was terminated
616	 * prematurely--too much hassle.
617	 */
618	if (tbp->b_bcount != tbp->b_bufsize) {
619		relpbuf(pb);
620		++start_lbn;
621		--len;
622		bawrite(tbp);
623		goto redo;
624	}
625	bp = pb;
626	b_save = malloc(sizeof(struct buf *) * (len + 1) + sizeof(struct cluster_save),
627	    M_SEGMENT, M_WAITOK);
628	b_save->bs_nchildren = 0;
629	b_save->bs_children = (struct buf **) (b_save + 1);
630	bp->b_saveaddr = b_save;
631	bp->b_bcount = 0;
632	bp->b_bufsize = 0;
633	bp->b_npages = 0;
634
635	if (tbp->b_flags & B_VMIO)
636		bp->b_flags |= B_VMIO;
637
638	bp->b_blkno = tbp->b_blkno;
639	bp->b_lblkno = tbp->b_lblkno;
640	(vm_offset_t) bp->b_data |= ((vm_offset_t) tbp->b_data) & PAGE_MASK;
641	bp->b_flags |= B_CALL | B_BUSY | B_CLUSTER;
642	bp->b_iodone = cluster_callback;
643	pbgetvp(vp, bp);
644
645	for (i = 0; i < len; ++i, ++start_lbn) {
646		if (i != 0) {
647			/*
648			 * Block is not in core or the non-sequential block
649			 * ending our cluster was part of the cluster (in
650			 * which case we don't want to write it twice).
651			 */
652			if (!(tbp = incore(vp, start_lbn)) ||
653			    (last_bp == NULL && start_lbn == lbn))
654				break;
655
656			if ((tbp->b_flags & (B_INVAL | B_CLUSTEROK)) != B_CLUSTEROK)
657				break;
658
659			if ((tbp->b_npages + bp->b_npages) > (MAXPHYS / PAGE_SIZE))
660				break;
661
662			if ( (tbp->b_blkno != tbp->b_lblkno) &&
663				((bp->b_blkno + btodb(size) * i) != tbp->b_blkno))
664				break;
665
666			/*
667			 * Get the desired block buffer (unless it is the
668			 * final sequential block whose buffer was passed in
669			 * explictly as last_bp).
670			 */
671			if (last_bp == NULL || start_lbn != lbn) {
672				if( tbp->b_flags & B_BUSY)
673					break;
674				tbp = getblk(vp, start_lbn, size, 0, 0);
675				if (!(tbp->b_flags & B_DELWRI) ||
676				    ((tbp->b_flags & B_VMIO) != (bp->b_flags & B_VMIO))) {
677					brelse(tbp);
678					break;
679				}
680			} else
681				tbp = last_bp;
682		}
683		for (j = 0; j < tbp->b_npages; j += 1) {
684			vm_page_t m;
685			m = tbp->b_pages[j];
686			++m->busy;
687			++m->object->paging_in_progress;
688			if ((bp->b_npages == 0) ||
689				(bp->b_pages[bp->b_npages - 1] != m)) {
690				bp->b_pages[bp->b_npages] = m;
691				bp->b_npages++;
692			}
693		}
694		bp->b_bcount += size;
695		bp->b_bufsize += size;
696
697		tbp->b_flags &= ~(B_READ | B_DONE | B_ERROR | B_DELWRI);
698		tbp->b_flags |= B_ASYNC;
699		s = splbio();
700		reassignbuf(tbp, tbp->b_vp);	/* put on clean list */
701		++tbp->b_vp->v_numoutput;
702		splx(s);
703		b_save->bs_children[i] = tbp;
704	}
705	b_save->bs_nchildren = i;
706	pmap_qenter(trunc_page((vm_offset_t) bp->b_data),
707		(vm_page_t *) bp->b_pages, bp->b_npages);
708	bawrite(bp);
709
710	if (i < len) {
711		len -= i;
712		goto redo;
713	}
714}
715
716/*
717 * Collect together all the buffers in a cluster.
718 * Plus add one additional buffer.
719 */
720struct cluster_save *
721cluster_collectbufs(vp, last_bp)
722	struct vnode *vp;
723	struct buf *last_bp;
724{
725	struct cluster_save *buflist;
726	daddr_t lbn;
727	int i, len;
728
729	len = vp->v_lastw - vp->v_cstart + 1;
730	buflist = malloc(sizeof(struct buf *) * (len + 1) + sizeof(*buflist),
731	    M_SEGMENT, M_WAITOK);
732	buflist->bs_nchildren = 0;
733	buflist->bs_children = (struct buf **) (buflist + 1);
734	for (lbn = vp->v_cstart, i = 0; i < len; lbn++, i++)
735		(void) bread(vp, lbn, last_bp->b_bcount, NOCRED,
736		    &buflist->bs_children[i]);
737	buflist->bs_children[i] = last_bp;
738	buflist->bs_nchildren = i + 1;
739	return (buflist);
740}
741