vfs_cluster.c revision 12283
1/*-
2 * Copyright (c) 1993
3 *	The Regents of the University of California.  All rights reserved.
4 * Modifications/enhancements:
5 * 	Copyright (c) 1995 John S. Dyson.  All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 *    notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 *    notice, this list of conditions and the following disclaimer in the
14 *    documentation and/or other materials provided with the distribution.
15 * 3. All advertising materials mentioning features or use of this software
16 *    must display the following acknowledgement:
17 *	This product includes software developed by the University of
18 *	California, Berkeley and its contributors.
19 * 4. Neither the name of the University nor the names of its contributors
20 *    may be used to endorse or promote products derived from this software
21 *    without specific prior written permission.
22 *
23 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
24 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
25 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
26 * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
27 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
28 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
29 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
30 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
31 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
32 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
33 * SUCH DAMAGE.
34 *
35 *	@(#)vfs_cluster.c	8.7 (Berkeley) 2/13/94
36 * $Id: vfs_cluster.c,v 1.23 1995/10/29 15:31:22 phk Exp $
37 */
38
39#include <sys/param.h>
40#include <sys/systm.h>
41#include <sys/proc.h>
42#include <sys/buf.h>
43#include <sys/vnode.h>
44#include <sys/mount.h>
45#include <sys/malloc.h>
46#include <sys/resourcevar.h>
47#include <sys/vmmeter.h>
48#include <miscfs/specfs/specdev.h>
49#include <vm/vm.h>
50#include <vm/vm_object.h>
51#include <vm/vm_page.h>
52
53#ifdef DEBUG
54#include <vm/vm.h>
55#include <sys/sysctl.h>
56int doreallocblks = 0;
57SYSCTL_INT(_debug, 13, doreallocblks, CTLFLAG_RW, &doreallocblks, 0, "");
58
59#else
60/* XXX for cluster_write */
61#define doreallocblks 0
62#endif
63
64/*
65 * Local declarations
66 */
67static struct buf *cluster_rbuild __P((struct vnode *, u_quad_t,
68    daddr_t, daddr_t, long, int));
69struct cluster_save *cluster_collectbufs __P((struct vnode *, struct buf *));
70
71int totreads;
72int totreadblocks;
73extern vm_page_t bogus_page;
74
75#ifdef DIAGNOSTIC
76/*
77 * Set to 1 if reads of block zero should cause readahead to be done.
78 * Set to 0 treats a read of block zero as a non-sequential read.
79 *
80 * Setting to one assumes that most reads of block zero of files are due to
81 * sequential passes over the files (e.g. cat, sum) where additional blocks
82 * will soon be needed.  Setting to zero assumes that the majority are
83 * surgical strikes to get particular info (e.g. size, file) where readahead
84 * blocks will not be used and, in fact, push out other potentially useful
85 * blocks from the cache.  The former seems intuitive, but some quick tests
86 * showed that the latter performed better from a system-wide point of view.
87 */
88	int doclusterraz = 0;
89
90#define ISSEQREAD(vp, blk) \
91	(((blk) != 0 || doclusterraz) && \
92	 ((blk) == (vp)->v_lastr + 1 || (blk) == (vp)->v_lastr))
93#else
94#define ISSEQREAD(vp, blk) \
95	(/* (blk) != 0 && */ ((blk) == (vp)->v_lastr + 1 || (blk) == (vp)->v_lastr))
96#endif
97
98/*
99 * allow for three entire read-aheads...  The system will
100 * adjust downwards rapidly if needed...
101 */
102#define RA_MULTIPLE_FAST	2
103#define RA_MULTIPLE_SLOW	3
104#define RA_SHIFTDOWN	1	/* approx lg2(RA_MULTIPLE) */
105/*
106 * This replaces bread.  If this is a bread at the beginning of a file and
107 * lastr is 0, we assume this is the first read and we'll read up to two
108 * blocks if they are sequential.  After that, we'll do regular read ahead
109 * in clustered chunks.
110 * 	bp is the block requested.
111 *	rbp is the read-ahead block.
112 *	If either is NULL, then you don't have to do the I/O.
113 */
114int
115cluster_read(vp, filesize, lblkno, size, cred, bpp)
116	struct vnode *vp;
117	u_quad_t filesize;
118	daddr_t lblkno;
119	long size;
120	struct ucred *cred;
121	struct buf **bpp;
122{
123	struct buf *bp, *rbp;
124	daddr_t blkno, rablkno, origlblkno;
125	int error, num_ra, alreadyincore;
126	int i;
127	int seq;
128
129	error = 0;
130	/*
131	 * get the requested block
132	 */
133	origlblkno = lblkno;
134	*bpp = bp = getblk(vp, lblkno, size, 0, 0);
135	seq = ISSEQREAD(vp, lblkno);
136	/*
137	 * if it is in the cache, then check to see if the reads have been
138	 * sequential.  If they have, then try some read-ahead, otherwise
139	 * back-off on prospective read-aheads.
140	 */
141	if (bp->b_flags & B_CACHE) {
142		if (!seq) {
143			vp->v_maxra = bp->b_lblkno + bp->b_bcount / size;
144			vp->v_ralen >>= RA_SHIFTDOWN;
145			return 0;
146		} else if( vp->v_maxra > lblkno) {
147			if ( (vp->v_maxra + (vp->v_ralen / RA_MULTIPLE_SLOW)) >= (lblkno + vp->v_ralen)) {
148				if ((vp->v_ralen + 1) < RA_MULTIPLE_FAST*(MAXPHYS / size))
149					++vp->v_ralen;
150				return 0;
151			}
152			lblkno = vp->v_maxra;
153		} else {
154			lblkno += 1;
155		}
156		bp = NULL;
157	} else {
158		/*
159		 * if it isn't in the cache, then get a chunk from disk if
160		 * sequential, otherwise just get the block.
161		 */
162		bp->b_flags |= B_READ;
163		lblkno += 1;
164		curproc->p_stats->p_ru.ru_inblock++;	/* XXX */
165		vp->v_ralen = 0;
166	}
167	/*
168	 * assume no read-ahead
169	 */
170	alreadyincore = 1;
171	rablkno = lblkno;
172
173	/*
174	 * if we have been doing sequential I/O, then do some read-ahead
175	 */
176	if (seq) {
177
178	/*
179	 * bump ralen a bit...
180	 */
181		if ((vp->v_ralen + 1) < RA_MULTIPLE_SLOW*(MAXPHYS / size))
182			++vp->v_ralen;
183		/*
184		 * this code makes sure that the stuff that we have read-ahead
185		 * is still in the cache.  If it isn't, we have been reading
186		 * ahead too much, and we need to back-off, otherwise we might
187		 * try to read more.
188		 */
189		for (i = 0; i < vp->v_ralen; i++) {
190			rablkno = lblkno + i;
191			alreadyincore = (int) incore(vp, rablkno);
192			if (!alreadyincore) {
193				if (inmem(vp, rablkno)) {
194					if (vp->v_maxra < rablkno)
195						vp->v_maxra = rablkno + 1;
196					continue;
197				}
198				if (rablkno < vp->v_maxra) {
199					vp->v_maxra = rablkno;
200					vp->v_ralen >>= RA_SHIFTDOWN;
201					alreadyincore = 1;
202				}
203				break;
204			} else if (vp->v_maxra < rablkno) {
205				vp->v_maxra = rablkno + 1;
206			}
207		}
208	}
209	/*
210	 * we now build the read-ahead buffer if it is desirable.
211	 */
212	rbp = NULL;
213	if (!alreadyincore &&
214	    (rablkno + 1) * size <= filesize &&
215	    !(error = VOP_BMAP(vp, rablkno, NULL, &blkno, &num_ra, NULL)) &&
216	    blkno != -1) {
217		if (num_ra > vp->v_ralen)
218			num_ra = vp->v_ralen;
219
220		if (num_ra) {
221			rbp = cluster_rbuild(vp, filesize, rablkno, blkno, size,
222				num_ra + 1);
223		} else {
224			rbp = getblk(vp, rablkno, size, 0, 0);
225			rbp->b_flags |= B_READ | B_ASYNC;
226			rbp->b_blkno = blkno;
227		}
228	}
229
230	/*
231	 * handle the synchronous read
232	 */
233	if (bp) {
234		if (bp->b_flags & (B_DONE | B_DELWRI))
235			panic("cluster_read: DONE bp");
236		else {
237			vfs_busy_pages(bp, 0);
238			error = VOP_STRATEGY(bp);
239			vp->v_maxra = bp->b_lblkno + bp->b_bcount / size;
240			totreads++;
241			totreadblocks += bp->b_bcount / size;
242			curproc->p_stats->p_ru.ru_inblock++;
243		}
244	}
245	/*
246	 * and if we have read-aheads, do them too
247	 */
248	if (rbp) {
249		vp->v_maxra = rbp->b_lblkno + rbp->b_bcount / size;
250		if (error || (rbp->b_flags & B_CACHE)) {
251			rbp->b_flags &= ~(B_ASYNC | B_READ);
252			brelse(rbp);
253		} else {
254			if ((rbp->b_flags & B_CLUSTER) == 0)
255				vfs_busy_pages(rbp, 0);
256			(void) VOP_STRATEGY(rbp);
257			totreads++;
258			totreadblocks += rbp->b_bcount / size;
259			curproc->p_stats->p_ru.ru_inblock++;
260		}
261	}
262	if (bp && ((bp->b_flags & B_ASYNC) == 0))
263		return (biowait(bp));
264	return (error);
265}
266
267/*
268 * If blocks are contiguous on disk, use this to provide clustered
269 * read ahead.  We will read as many blocks as possible sequentially
270 * and then parcel them up into logical blocks in the buffer hash table.
271 */
272static struct buf *
273cluster_rbuild(vp, filesize, lbn, blkno, size, run)
274	struct vnode *vp;
275	u_quad_t filesize;
276	daddr_t lbn;
277	daddr_t blkno;
278	long size;
279	int run;
280{
281	struct cluster_save *b_save;
282	struct buf *bp, *tbp;
283	daddr_t bn;
284	int i, inc, j;
285
286#ifdef DIAGNOSTIC
287	if (size != vp->v_mount->mnt_stat.f_iosize)
288		panic("cluster_rbuild: size %d != filesize %d\n",
289		    size, vp->v_mount->mnt_stat.f_iosize);
290#endif
291	if (size * (lbn + run) > filesize)
292		--run;
293
294	tbp = getblk(vp, lbn, size, 0, 0);
295	if (tbp->b_flags & B_CACHE)
296		return tbp;
297
298	tbp->b_blkno = blkno;
299	tbp->b_flags |= B_ASYNC | B_READ;
300	if( ((tbp->b_flags & B_VMIO) == 0) || (run <= 1) )
301		return tbp;
302
303	bp = trypbuf();
304	if (bp == 0)
305		return tbp;
306
307	(vm_offset_t) bp->b_data |= ((vm_offset_t) tbp->b_data) & PAGE_MASK;
308	bp->b_flags = B_ASYNC | B_READ | B_CALL | B_BUSY | B_CLUSTER | B_VMIO;
309	bp->b_iodone = cluster_callback;
310	bp->b_blkno = blkno;
311	bp->b_lblkno = lbn;
312	pbgetvp(vp, bp);
313
314	b_save = malloc(sizeof(struct buf *) * run +
315		sizeof(struct cluster_save), M_SEGMENT, M_WAITOK);
316	b_save->bs_nchildren = 0;
317	b_save->bs_children = (struct buf **) (b_save + 1);
318	bp->b_saveaddr = b_save;
319
320	bp->b_bcount = 0;
321	bp->b_bufsize = 0;
322	bp->b_npages = 0;
323
324	inc = btodb(size);
325	for (bn = blkno, i = 0; i < run; ++i, bn += inc) {
326		if (i != 0) {
327			if ((bp->b_npages * PAGE_SIZE) + size > MAXPHYS)
328				break;
329
330			if (incore(vp, lbn + i))
331				break;
332			tbp = getblk(vp, lbn + i, size, 0, 0);
333
334			if ((tbp->b_flags & B_CACHE) ||
335				(tbp->b_flags & B_VMIO) == 0) {
336				brelse(tbp);
337				break;
338			}
339
340			for (j=0;j<tbp->b_npages;j++) {
341				if (tbp->b_pages[j]->valid) {
342					break;
343				}
344			}
345
346			if (j != tbp->b_npages) {
347				/*
348				 * force buffer to be re-constituted later
349				 */
350				tbp->b_flags |= B_RELBUF;
351				brelse(tbp);
352				break;
353			}
354
355			tbp->b_flags |= B_READ | B_ASYNC;
356			if( tbp->b_blkno == tbp->b_lblkno) {
357				tbp->b_blkno = bn;
358			} else if (tbp->b_blkno != bn) {
359				brelse(tbp);
360				break;
361			}
362		}
363		++b_save->bs_nchildren;
364		b_save->bs_children[i] = tbp;
365		for (j = 0; j < tbp->b_npages; j += 1) {
366			vm_page_t m;
367			m = tbp->b_pages[j];
368			++m->busy;
369			++m->object->paging_in_progress;
370			if ((m->valid & VM_PAGE_BITS_ALL) == VM_PAGE_BITS_ALL) {
371				m = bogus_page;
372			}
373			if ((bp->b_npages == 0) ||
374				(bp->b_bufsize & PAGE_MASK) == 0) {
375				bp->b_pages[bp->b_npages] = m;
376				bp->b_npages++;
377			} else {
378				if ( tbp->b_npages > 1) {
379					panic("cluster_rbuild: page unaligned filesystems not supported");
380				}
381			}
382		}
383		bp->b_bcount += tbp->b_bcount;
384		bp->b_bufsize += tbp->b_bufsize;
385	}
386	pmap_qenter(trunc_page((vm_offset_t) bp->b_data),
387		(vm_page_t *)bp->b_pages, bp->b_npages);
388	return (bp);
389}
390
391/*
392 * Cleanup after a clustered read or write.
393 * This is complicated by the fact that any of the buffers might have
394 * extra memory (if there were no empty buffer headers at allocbuf time)
395 * that we will need to shift around.
396 */
397void
398cluster_callback(bp)
399	struct buf *bp;
400{
401	struct cluster_save *b_save;
402	struct buf **bpp, *tbp;
403	int error = 0;
404
405	/*
406	 * Must propogate errors to all the components.
407	 */
408	if (bp->b_flags & B_ERROR)
409		error = bp->b_error;
410
411	b_save = (struct cluster_save *) (bp->b_saveaddr);
412	pmap_qremove(trunc_page((vm_offset_t) bp->b_data), bp->b_npages);
413	/*
414	 * Move memory from the large cluster buffer into the component
415	 * buffers and mark IO as done on these.
416	 */
417	for (bpp = b_save->bs_children; b_save->bs_nchildren--; ++bpp) {
418		tbp = *bpp;
419		if (error) {
420			tbp->b_flags |= B_ERROR;
421			tbp->b_error = error;
422		}
423		biodone(tbp);
424	}
425	free(b_save, M_SEGMENT);
426	relpbuf(bp);
427}
428
429/*
430 * Do clustered write for FFS.
431 *
432 * Three cases:
433 *	1. Write is not sequential (write asynchronously)
434 *	Write is sequential:
435 *	2.	beginning of cluster - begin cluster
436 *	3.	middle of a cluster - add to cluster
437 *	4.	end of a cluster - asynchronously write cluster
438 */
439void
440cluster_write(bp, filesize)
441	struct buf *bp;
442	u_quad_t filesize;
443{
444	struct vnode *vp;
445	daddr_t lbn;
446	int maxclen, cursize;
447	int lblocksize;
448
449	vp = bp->b_vp;
450	lblocksize = vp->v_mount->mnt_stat.f_iosize;
451	lbn = bp->b_lblkno;
452
453	/* Initialize vnode to beginning of file. */
454	if (lbn == 0)
455		vp->v_lasta = vp->v_clen = vp->v_cstart = vp->v_lastw = 0;
456
457	if (vp->v_clen == 0 || lbn != vp->v_lastw + 1 ||
458	    (bp->b_blkno != vp->v_lasta + btodb(lblocksize))) {
459		maxclen = MAXPHYS / lblocksize - 1;
460		if (vp->v_clen != 0) {
461			/*
462			 * Next block is not sequential.
463			 *
464			 * If we are not writing at end of file, the process
465			 * seeked to another point in the file since its last
466			 * write, or we have reached our maximum cluster size,
467			 * then push the previous cluster. Otherwise try
468			 * reallocating to make it sequential.
469			 */
470			cursize = vp->v_lastw - vp->v_cstart + 1;
471			if (!doreallocblks ||
472			    (lbn + 1) * lblocksize != filesize ||
473			    lbn != vp->v_lastw + 1 || vp->v_clen <= cursize) {
474				cluster_wbuild(vp, NULL, lblocksize,
475				    vp->v_cstart, cursize, lbn);
476			} else {
477				struct buf **bpp, **endbp;
478				struct cluster_save *buflist;
479
480				buflist = cluster_collectbufs(vp, bp);
481				endbp = &buflist->bs_children
482				    [buflist->bs_nchildren - 1];
483				if (VOP_REALLOCBLKS(vp, buflist)) {
484					/*
485					 * Failed, push the previous cluster.
486					 */
487					for (bpp = buflist->bs_children;
488					     bpp < endbp; bpp++)
489						brelse(*bpp);
490					free(buflist, M_SEGMENT);
491					cluster_wbuild(vp, NULL, lblocksize,
492					    vp->v_cstart, cursize, lbn);
493				} else {
494					/*
495					 * Succeeded, keep building cluster.
496					 */
497					for (bpp = buflist->bs_children;
498					     bpp <= endbp; bpp++)
499						bdwrite(*bpp);
500					free(buflist, M_SEGMENT);
501					vp->v_lastw = lbn;
502					vp->v_lasta = bp->b_blkno;
503					return;
504				}
505			}
506		}
507		/*
508		 * Consider beginning a cluster. If at end of file, make
509		 * cluster as large as possible, otherwise find size of
510		 * existing cluster.
511		 */
512		if ((lbn + 1) * lblocksize != filesize &&
513		    (bp->b_blkno == bp->b_lblkno) &&
514		    (VOP_BMAP(vp, lbn, NULL, &bp->b_blkno, &maxclen, NULL) ||
515		     bp->b_blkno == -1)) {
516			bawrite(bp);
517			vp->v_clen = 0;
518			vp->v_lasta = bp->b_blkno;
519			vp->v_cstart = lbn + 1;
520			vp->v_lastw = lbn;
521			return;
522		}
523		vp->v_clen = maxclen;
524		if (maxclen == 0) {	/* I/O not contiguous */
525			vp->v_cstart = lbn + 1;
526			bawrite(bp);
527		} else {	/* Wait for rest of cluster */
528			vp->v_cstart = lbn;
529			bdwrite(bp);
530		}
531	} else if (lbn == vp->v_cstart + vp->v_clen) {
532		/*
533		 * At end of cluster, write it out.
534		 */
535		cluster_wbuild(vp, bp, bp->b_bcount, vp->v_cstart,
536		    vp->v_clen + 1, lbn);
537		vp->v_clen = 0;
538		vp->v_cstart = lbn + 1;
539	} else
540		/*
541		 * In the middle of a cluster, so just delay the I/O for now.
542		 */
543		bdwrite(bp);
544	vp->v_lastw = lbn;
545	vp->v_lasta = bp->b_blkno;
546}
547
548
549/*
550 * This is an awful lot like cluster_rbuild...wish they could be combined.
551 * The last lbn argument is the current block on which I/O is being
552 * performed.  Check to see that it doesn't fall in the middle of
553 * the current block (if last_bp == NULL).
554 */
555void
556cluster_wbuild(vp, last_bp, size, start_lbn, len, lbn)
557	struct vnode *vp;
558	struct buf *last_bp;
559	long size;
560	daddr_t start_lbn;
561	int len;
562	daddr_t lbn;
563{
564	struct cluster_save *b_save;
565	struct buf *bp, *tbp, *pb;
566	int i, j, s;
567
568#ifdef DIAGNOSTIC
569	if (size != vp->v_mount->mnt_stat.f_iosize)
570		panic("cluster_wbuild: size %d != filesize %d\n",
571		    size, vp->v_mount->mnt_stat.f_iosize);
572#endif
573redo:
574	if( (lbn != -1) || (last_bp == 0)) {
575		while ((!(tbp = incore(vp, start_lbn)) || (tbp->b_flags & B_BUSY)
576			|| (start_lbn == lbn)) && len) {
577			++start_lbn;
578			--len;
579		}
580
581		pb = trypbuf();
582		/* Get more memory for current buffer */
583		if (len <= 1 || pb == NULL) {
584			if (pb != NULL)
585				relpbuf(pb);
586			if (last_bp) {
587				bawrite(last_bp);
588			} else if (len) {
589				bp = getblk(vp, start_lbn, size, 0, 0);
590				bawrite(bp);
591			}
592			return;
593		}
594		tbp = getblk(vp, start_lbn, size, 0, 0);
595	} else {
596		tbp = last_bp;
597		if( tbp->b_flags & B_BUSY) {
598			printf("vfs_cluster: warning: buffer already busy\n");
599		}
600		tbp->b_flags |= B_BUSY;
601		last_bp = 0;
602		pb = trypbuf();
603		if (pb == NULL) {
604			bawrite(tbp);
605			return;
606		}
607	}
608
609	if (!(tbp->b_flags & B_DELWRI)) {
610		relpbuf(pb);
611		++start_lbn;
612		--len;
613		brelse(tbp);
614		goto redo;
615	}
616	/*
617	 * Extra memory in the buffer, punt on this buffer. XXX we could
618	 * handle this in most cases, but we would have to push the extra
619	 * memory down to after our max possible cluster size and then
620	 * potentially pull it back up if the cluster was terminated
621	 * prematurely--too much hassle.
622	 */
623	if (((tbp->b_flags & B_VMIO) == 0) ||
624		(tbp->b_bcount != tbp->b_bufsize)) {
625		relpbuf(pb);
626		++start_lbn;
627		--len;
628		bawrite(tbp);
629		goto redo;
630	}
631	bp = pb;
632	b_save = malloc(sizeof(struct buf *) * (len + 1) + sizeof(struct cluster_save),
633	    M_SEGMENT, M_WAITOK);
634	b_save->bs_nchildren = 0;
635	b_save->bs_children = (struct buf **) (b_save + 1);
636	bp->b_saveaddr = b_save;
637	bp->b_bcount = 0;
638	bp->b_bufsize = 0;
639	bp->b_npages = 0;
640
641	if (tbp->b_flags & B_VMIO)
642		bp->b_flags |= B_VMIO;
643
644	bp->b_blkno = tbp->b_blkno;
645	bp->b_lblkno = tbp->b_lblkno;
646	(vm_offset_t) bp->b_data |= ((vm_offset_t) tbp->b_data) & PAGE_MASK;
647	bp->b_flags |= B_CALL | B_BUSY | B_CLUSTER;
648	bp->b_iodone = cluster_callback;
649	pbgetvp(vp, bp);
650
651	for (i = 0; i < len; ++i, ++start_lbn) {
652		if (i != 0) {
653			/*
654			 * Block is not in core or the non-sequential block
655			 * ending our cluster was part of the cluster (in
656			 * which case we don't want to write it twice).
657			 */
658			if (!(tbp = incore(vp, start_lbn)) ||
659			    (last_bp == NULL && start_lbn == lbn))
660				break;
661
662			if ((tbp->b_flags & (B_INVAL | B_CLUSTEROK)) != B_CLUSTEROK)
663				break;
664
665			if ((tbp->b_npages + bp->b_npages) > (MAXPHYS / PAGE_SIZE))
666				break;
667
668			if ( (tbp->b_blkno != tbp->b_lblkno) &&
669				((bp->b_blkno + btodb(size) * i) != tbp->b_blkno))
670				break;
671
672			/*
673			 * Get the desired block buffer (unless it is the
674			 * final sequential block whose buffer was passed in
675			 * explictly as last_bp).
676			 */
677			if (last_bp == NULL || start_lbn != lbn) {
678				if( tbp->b_flags & B_BUSY)
679					break;
680				tbp = getblk(vp, start_lbn, size, 0, 0);
681				if (!(tbp->b_flags & B_DELWRI) ||
682				    ((tbp->b_flags & B_VMIO) != (bp->b_flags & B_VMIO))) {
683					brelse(tbp);
684					break;
685				}
686			} else
687				tbp = last_bp;
688		}
689		for (j = 0; j < tbp->b_npages; j += 1) {
690			vm_page_t m;
691			m = tbp->b_pages[j];
692			++m->busy;
693			++m->object->paging_in_progress;
694			if ((bp->b_npages == 0) ||
695				(bp->b_pages[bp->b_npages - 1] != m)) {
696				bp->b_pages[bp->b_npages] = m;
697				bp->b_npages++;
698			}
699		}
700		bp->b_bcount += size;
701		bp->b_bufsize += size;
702
703		tbp->b_flags &= ~(B_READ | B_DONE | B_ERROR | B_DELWRI);
704		tbp->b_flags |= B_ASYNC;
705		s = splbio();
706		reassignbuf(tbp, tbp->b_vp);	/* put on clean list */
707		++tbp->b_vp->v_numoutput;
708		splx(s);
709		b_save->bs_children[i] = tbp;
710	}
711	b_save->bs_nchildren = i;
712	pmap_qenter(trunc_page((vm_offset_t) bp->b_data),
713		(vm_page_t *) bp->b_pages, bp->b_npages);
714	bawrite(bp);
715
716	if (i < len) {
717		len -= i;
718		goto redo;
719	}
720}
721
722/*
723 * Collect together all the buffers in a cluster.
724 * Plus add one additional buffer.
725 */
726struct cluster_save *
727cluster_collectbufs(vp, last_bp)
728	struct vnode *vp;
729	struct buf *last_bp;
730{
731	struct cluster_save *buflist;
732	daddr_t lbn;
733	int i, len;
734
735	len = vp->v_lastw - vp->v_cstart + 1;
736	buflist = malloc(sizeof(struct buf *) * (len + 1) + sizeof(*buflist),
737	    M_SEGMENT, M_WAITOK);
738	buflist->bs_nchildren = 0;
739	buflist->bs_children = (struct buf **) (buflist + 1);
740	for (lbn = vp->v_cstart, i = 0; i < len; lbn++, i++)
741		(void) bread(vp, lbn, last_bp->b_bcount, NOCRED,
742		    &buflist->bs_children[i]);
743	buflist->bs_children[i] = last_bp;
744	buflist->bs_nchildren = i + 1;
745	return (buflist);
746}
747