vfs_cluster.c revision 12407
1/*-
2 * Copyright (c) 1993
3 *	The Regents of the University of California.  All rights reserved.
4 * Modifications/enhancements:
5 * 	Copyright (c) 1995 John S. Dyson.  All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 *    notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 *    notice, this list of conditions and the following disclaimer in the
14 *    documentation and/or other materials provided with the distribution.
15 * 3. All advertising materials mentioning features or use of this software
16 *    must display the following acknowledgement:
17 *	This product includes software developed by the University of
18 *	California, Berkeley and its contributors.
19 * 4. Neither the name of the University nor the names of its contributors
20 *    may be used to endorse or promote products derived from this software
21 *    without specific prior written permission.
22 *
23 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
24 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
25 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
26 * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
27 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
28 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
29 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
30 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
31 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
32 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
33 * SUCH DAMAGE.
34 *
35 *	@(#)vfs_cluster.c	8.7 (Berkeley) 2/13/94
36 * $Id: vfs_cluster.c,v 1.25 1995/11/19 19:54:19 dyson Exp $
37 */
38
39#include <sys/param.h>
40#include <sys/systm.h>
41#include <sys/proc.h>
42#include <sys/buf.h>
43#include <sys/vnode.h>
44#include <sys/mount.h>
45#include <sys/malloc.h>
46#include <sys/resourcevar.h>
47#include <sys/vmmeter.h>
48#include <miscfs/specfs/specdev.h>
49#include <vm/vm.h>
50#include <vm/vm_object.h>
51#include <vm/vm_page.h>
52
53#ifdef DEBUG
54#include <vm/vm.h>
55#include <sys/sysctl.h>
56int doreallocblks = 0;
57SYSCTL_INT(_debug, 13, doreallocblks, CTLFLAG_RW, &doreallocblks, 0, "");
58
59#else
60/* XXX for cluster_write */
61#define doreallocblks 0
62#endif
63
64/*
65 * Local declarations
66 */
67static struct buf *cluster_rbuild __P((struct vnode *, u_quad_t,
68    daddr_t, daddr_t, long, int));
69struct cluster_save *cluster_collectbufs __P((struct vnode *, struct buf *));
70
71int totreads;
72int totreadblocks;
73extern vm_page_t bogus_page;
74
75#ifdef DIAGNOSTIC
76/*
77 * Set to 1 if reads of block zero should cause readahead to be done.
78 * Set to 0 treats a read of block zero as a non-sequential read.
79 *
80 * Setting to one assumes that most reads of block zero of files are due to
81 * sequential passes over the files (e.g. cat, sum) where additional blocks
82 * will soon be needed.  Setting to zero assumes that the majority are
83 * surgical strikes to get particular info (e.g. size, file) where readahead
84 * blocks will not be used and, in fact, push out other potentially useful
85 * blocks from the cache.  The former seems intuitive, but some quick tests
86 * showed that the latter performed better from a system-wide point of view.
87 */
88	int doclusterraz = 0;
89
90#define ISSEQREAD(vp, blk) \
91	(((blk) != 0 || doclusterraz) && \
92	 ((blk) == (vp)->v_lastr + 1 || (blk) == (vp)->v_lastr))
93#else
94#define ISSEQREAD(vp, blk) \
95	(/* (blk) != 0 && */ ((blk) == (vp)->v_lastr + 1 || (blk) == (vp)->v_lastr))
96#endif
97
98/*
99 * allow for three entire read-aheads...  The system will
100 * adjust downwards rapidly if needed...
101 */
102#define RA_MULTIPLE_FAST	2
103#define RA_MULTIPLE_SLOW	3
104#define RA_SHIFTDOWN	1	/* approx lg2(RA_MULTIPLE) */
105/*
106 * This replaces bread.  If this is a bread at the beginning of a file and
107 * lastr is 0, we assume this is the first read and we'll read up to two
108 * blocks if they are sequential.  After that, we'll do regular read ahead
109 * in clustered chunks.
110 * 	bp is the block requested.
111 *	rbp is the read-ahead block.
112 *	If either is NULL, then you don't have to do the I/O.
113 */
114int
115cluster_read(vp, filesize, lblkno, size, cred, bpp)
116	struct vnode *vp;
117	u_quad_t filesize;
118	daddr_t lblkno;
119	long size;
120	struct ucred *cred;
121	struct buf **bpp;
122{
123	struct buf *bp, *rbp;
124	daddr_t blkno, rablkno, origlblkno;
125	int error, num_ra, alreadyincore;
126	int i;
127	int seq;
128
129	error = 0;
130	/*
131	 * get the requested block
132	 */
133	origlblkno = lblkno;
134	*bpp = bp = getblk(vp, lblkno, size, 0, 0);
135	seq = ISSEQREAD(vp, lblkno);
136	/*
137	 * if it is in the cache, then check to see if the reads have been
138	 * sequential.  If they have, then try some read-ahead, otherwise
139	 * back-off on prospective read-aheads.
140	 */
141	if (bp->b_flags & B_CACHE) {
142		if (!seq) {
143			vp->v_maxra = bp->b_lblkno + bp->b_bcount / size;
144			vp->v_ralen >>= RA_SHIFTDOWN;
145			return 0;
146		} else if( vp->v_maxra > lblkno) {
147			if ( (vp->v_maxra + (vp->v_ralen / RA_MULTIPLE_SLOW)) >= (lblkno + vp->v_ralen)) {
148				if ((vp->v_ralen + 1) < RA_MULTIPLE_FAST*(MAXPHYS / size))
149					++vp->v_ralen;
150				return 0;
151			}
152			lblkno = vp->v_maxra;
153		} else {
154			lblkno += 1;
155		}
156		bp = NULL;
157	} else {
158		/*
159		 * if it isn't in the cache, then get a chunk from disk if
160		 * sequential, otherwise just get the block.
161		 */
162		bp->b_flags |= B_READ;
163		lblkno += 1;
164		curproc->p_stats->p_ru.ru_inblock++;	/* XXX */
165		vp->v_ralen = 0;
166	}
167	/*
168	 * assume no read-ahead
169	 */
170	alreadyincore = 1;
171	rablkno = lblkno;
172
173	/*
174	 * if we have been doing sequential I/O, then do some read-ahead
175	 */
176	if (seq) {
177
178	/*
179	 * bump ralen a bit...
180	 */
181		if ((vp->v_ralen + 1) < RA_MULTIPLE_SLOW*(MAXPHYS / size))
182			++vp->v_ralen;
183		/*
184		 * this code makes sure that the stuff that we have read-ahead
185		 * is still in the cache.  If it isn't, we have been reading
186		 * ahead too much, and we need to back-off, otherwise we might
187		 * try to read more.
188		 */
189		for (i = 0; i < vp->v_ralen; i++) {
190			rablkno = lblkno + i;
191			alreadyincore = (int) incore(vp, rablkno);
192			if (!alreadyincore) {
193				if (inmem(vp, rablkno)) {
194					if (vp->v_maxra < rablkno)
195						vp->v_maxra = rablkno + 1;
196					continue;
197				}
198				if (rablkno < vp->v_maxra) {
199					vp->v_maxra = rablkno;
200					vp->v_ralen >>= RA_SHIFTDOWN;
201					alreadyincore = 1;
202				}
203				break;
204			} else if (vp->v_maxra < rablkno) {
205				vp->v_maxra = rablkno + 1;
206			}
207		}
208	}
209	/*
210	 * we now build the read-ahead buffer if it is desirable.
211	 */
212	rbp = NULL;
213	if (!alreadyincore &&
214	    (rablkno + 1) * size <= filesize &&
215	    !(error = VOP_BMAP(vp, rablkno, NULL, &blkno, &num_ra, NULL)) &&
216	    blkno != -1) {
217		if (num_ra > vp->v_ralen)
218			num_ra = vp->v_ralen;
219
220		if (num_ra) {
221			rbp = cluster_rbuild(vp, filesize, rablkno, blkno, size,
222				num_ra + 1);
223		} else {
224			rbp = getblk(vp, rablkno, size, 0, 0);
225			rbp->b_flags |= B_READ | B_ASYNC;
226			rbp->b_blkno = blkno;
227		}
228	}
229
230	/*
231	 * handle the synchronous read
232	 */
233	if (bp) {
234		if (bp->b_flags & (B_DONE | B_DELWRI))
235			panic("cluster_read: DONE bp");
236		else {
237			vfs_busy_pages(bp, 0);
238			error = VOP_STRATEGY(bp);
239			vp->v_maxra = bp->b_lblkno + bp->b_bcount / size;
240			totreads++;
241			totreadblocks += bp->b_bcount / size;
242			curproc->p_stats->p_ru.ru_inblock++;
243		}
244	}
245	/*
246	 * and if we have read-aheads, do them too
247	 */
248	if (rbp) {
249		vp->v_maxra = rbp->b_lblkno + rbp->b_bcount / size;
250		if (error || (rbp->b_flags & B_CACHE)) {
251			rbp->b_flags &= ~(B_ASYNC | B_READ);
252			brelse(rbp);
253		} else {
254			if ((rbp->b_flags & B_CLUSTER) == 0)
255				vfs_busy_pages(rbp, 0);
256			(void) VOP_STRATEGY(rbp);
257			totreads++;
258			totreadblocks += rbp->b_bcount / size;
259			curproc->p_stats->p_ru.ru_inblock++;
260		}
261	}
262	if (bp && ((bp->b_flags & B_ASYNC) == 0))
263		return (biowait(bp));
264	return (error);
265}
266
267/*
268 * If blocks are contiguous on disk, use this to provide clustered
269 * read ahead.  We will read as many blocks as possible sequentially
270 * and then parcel them up into logical blocks in the buffer hash table.
271 */
272static struct buf *
273cluster_rbuild(vp, filesize, lbn, blkno, size, run)
274	struct vnode *vp;
275	u_quad_t filesize;
276	daddr_t lbn;
277	daddr_t blkno;
278	long size;
279	int run;
280{
281	struct buf *bp, *tbp;
282	daddr_t bn;
283	int i, inc, j;
284
285#ifdef DIAGNOSTIC
286	if (size != vp->v_mount->mnt_stat.f_iosize)
287		panic("cluster_rbuild: size %d != filesize %d\n",
288		    size, vp->v_mount->mnt_stat.f_iosize);
289#endif
290	if (size * (lbn + run) > filesize)
291		--run;
292
293	tbp = getblk(vp, lbn, size, 0, 0);
294	if (tbp->b_flags & B_CACHE)
295		return tbp;
296
297	tbp->b_blkno = blkno;
298	tbp->b_flags |= B_ASYNC | B_READ;
299	if( ((tbp->b_flags & B_VMIO) == 0) || (run <= 1) )
300		return tbp;
301
302	bp = trypbuf();
303	if (bp == 0)
304		return tbp;
305
306	(vm_offset_t) bp->b_data |= ((vm_offset_t) tbp->b_data) & PAGE_MASK;
307	bp->b_flags = B_ASYNC | B_READ | B_CALL | B_BUSY | B_CLUSTER | B_VMIO;
308	bp->b_iodone = cluster_callback;
309	bp->b_blkno = blkno;
310	bp->b_lblkno = lbn;
311	pbgetvp(vp, bp);
312
313	TAILQ_INIT(&bp->b_cluster.cluster_head);
314
315	bp->b_bcount = 0;
316	bp->b_bufsize = 0;
317	bp->b_npages = 0;
318
319	inc = btodb(size);
320	for (bn = blkno, i = 0; i < run; ++i, bn += inc) {
321		if (i != 0) {
322			if ((bp->b_npages * PAGE_SIZE) + size > MAXPHYS)
323				break;
324
325			if (incore(vp, lbn + i))
326				break;
327			tbp = getblk(vp, lbn + i, size, 0, 0);
328
329			if ((tbp->b_flags & B_CACHE) ||
330				(tbp->b_flags & B_VMIO) == 0) {
331				brelse(tbp);
332				break;
333			}
334
335			for (j=0;j<tbp->b_npages;j++) {
336				if (tbp->b_pages[j]->valid) {
337					break;
338				}
339			}
340
341			if (j != tbp->b_npages) {
342				/*
343				 * force buffer to be re-constituted later
344				 */
345				tbp->b_flags |= B_RELBUF;
346				brelse(tbp);
347				break;
348			}
349
350			tbp->b_flags |= B_READ | B_ASYNC;
351			if( tbp->b_blkno == tbp->b_lblkno) {
352				tbp->b_blkno = bn;
353			} else if (tbp->b_blkno != bn) {
354				brelse(tbp);
355				break;
356			}
357		}
358		TAILQ_INSERT_TAIL(&bp->b_cluster.cluster_head,
359			tbp, b_cluster.cluster_entry);
360		for (j = 0; j < tbp->b_npages; j += 1) {
361			vm_page_t m;
362			m = tbp->b_pages[j];
363			++m->busy;
364			++m->object->paging_in_progress;
365			if ((m->valid & VM_PAGE_BITS_ALL) == VM_PAGE_BITS_ALL) {
366				m = bogus_page;
367			}
368			if ((bp->b_npages == 0) ||
369				(bp->b_bufsize & PAGE_MASK) == 0) {
370				bp->b_pages[bp->b_npages] = m;
371				bp->b_npages++;
372			} else {
373				if ( tbp->b_npages > 1) {
374					panic("cluster_rbuild: page unaligned filesystems not supported");
375				}
376			}
377		}
378		bp->b_bcount += tbp->b_bcount;
379		bp->b_bufsize += tbp->b_bufsize;
380	}
381	pmap_qenter(trunc_page((vm_offset_t) bp->b_data),
382		(vm_page_t *)bp->b_pages, bp->b_npages);
383	return (bp);
384}
385
386/*
387 * Cleanup after a clustered read or write.
388 * This is complicated by the fact that any of the buffers might have
389 * extra memory (if there were no empty buffer headers at allocbuf time)
390 * that we will need to shift around.
391 */
392void
393cluster_callback(bp)
394	struct buf *bp;
395{
396	struct buf *nbp, *tbp;
397	int error = 0;
398
399	/*
400	 * Must propogate errors to all the components.
401	 */
402	if (bp->b_flags & B_ERROR)
403		error = bp->b_error;
404
405	pmap_qremove(trunc_page((vm_offset_t) bp->b_data), bp->b_npages);
406	/*
407	 * Move memory from the large cluster buffer into the component
408	 * buffers and mark IO as done on these.
409	 */
410	for (tbp = bp->b_cluster.cluster_head.tqh_first;
411		tbp; tbp = nbp) {
412		nbp = tbp->b_cluster.cluster_entry.tqe_next;
413		if (error) {
414			tbp->b_flags |= B_ERROR;
415			tbp->b_error = error;
416		}
417		biodone(tbp);
418	}
419	relpbuf(bp);
420}
421
422/*
423 * Do clustered write for FFS.
424 *
425 * Three cases:
426 *	1. Write is not sequential (write asynchronously)
427 *	Write is sequential:
428 *	2.	beginning of cluster - begin cluster
429 *	3.	middle of a cluster - add to cluster
430 *	4.	end of a cluster - asynchronously write cluster
431 */
432void
433cluster_write(bp, filesize)
434	struct buf *bp;
435	u_quad_t filesize;
436{
437	struct vnode *vp;
438	daddr_t lbn;
439	int maxclen, cursize;
440	int lblocksize;
441	int async;
442
443	vp = bp->b_vp;
444	async = (vp->v_mount && (vp->v_mount->mnt_flag & MNT_ASYNC));
445	lblocksize = vp->v_mount->mnt_stat.f_iosize;
446	lbn = bp->b_lblkno;
447
448	/* Initialize vnode to beginning of file. */
449	if (lbn == 0)
450		vp->v_lasta = vp->v_clen = vp->v_cstart = vp->v_lastw = 0;
451
452	if (vp->v_clen == 0 || lbn != vp->v_lastw + 1 ||
453	    (bp->b_blkno != vp->v_lasta + btodb(lblocksize))) {
454		maxclen = MAXPHYS / lblocksize - 1;
455		if (vp->v_clen != 0) {
456			/*
457			 * Next block is not sequential.
458			 *
459			 * If we are not writing at end of file, the process
460			 * seeked to another point in the file since its last
461			 * write, or we have reached our maximum cluster size,
462			 * then push the previous cluster. Otherwise try
463			 * reallocating to make it sequential.
464			 */
465			cursize = vp->v_lastw - vp->v_cstart + 1;
466#if 1
467			if ((lbn + 1) * lblocksize != filesize ||
468				lbn != vp->v_lastw + 1 ||
469				vp->v_clen <= cursize) {
470				if (!async)
471					cluster_wbuild(vp, lblocksize,
472						vp->v_cstart, cursize);
473			}
474#else
475			if (!doreallocblks ||
476			    (lbn + 1) * lblocksize != filesize ||
477			    lbn != vp->v_lastw + 1 || vp->v_clen <= cursize) {
478				if (!async)
479					cluster_wbuild(vp, lblocksize,
480						vp->v_cstart, cursize);
481			} else {
482				struct buf **bpp, **endbp;
483				struct cluster_save *buflist;
484
485				buflist = cluster_collectbufs(vp, bp);
486				endbp = &buflist->bs_children
487				    [buflist->bs_nchildren - 1];
488				if (VOP_REALLOCBLKS(vp, buflist)) {
489					/*
490					 * Failed, push the previous cluster.
491					 */
492					for (bpp = buflist->bs_children;
493					     bpp < endbp; bpp++)
494						brelse(*bpp);
495					free(buflist, M_SEGMENT);
496					cluster_wbuild(vp, lblocksize,
497					    vp->v_cstart, cursize);
498				} else {
499					/*
500					 * Succeeded, keep building cluster.
501					 */
502					for (bpp = buflist->bs_children;
503					     bpp <= endbp; bpp++)
504						bdwrite(*bpp);
505					free(buflist, M_SEGMENT);
506					vp->v_lastw = lbn;
507					vp->v_lasta = bp->b_blkno;
508					return;
509				}
510			}
511#endif
512		}
513		/*
514		 * Consider beginning a cluster. If at end of file, make
515		 * cluster as large as possible, otherwise find size of
516		 * existing cluster.
517		 */
518		if ((lbn + 1) * lblocksize != filesize &&
519		    (bp->b_blkno == bp->b_lblkno) &&
520		    (VOP_BMAP(vp, lbn, NULL, &bp->b_blkno, &maxclen, NULL) ||
521		     bp->b_blkno == -1)) {
522			bawrite(bp);
523			vp->v_clen = 0;
524			vp->v_lasta = bp->b_blkno;
525			vp->v_cstart = lbn + 1;
526			vp->v_lastw = lbn;
527			return;
528		}
529		vp->v_clen = maxclen;
530		if (!async && maxclen == 0) {	/* I/O not contiguous */
531			vp->v_cstart = lbn + 1;
532			bawrite(bp);
533		} else {	/* Wait for rest of cluster */
534			vp->v_cstart = lbn;
535			bdwrite(bp);
536		}
537	} else if (lbn == vp->v_cstart + vp->v_clen) {
538		/*
539		 * At end of cluster, write it out.
540		 */
541		bdwrite(bp);
542		cluster_wbuild(vp, lblocksize, vp->v_cstart,
543		    vp->v_clen + 1);
544		vp->v_clen = 0;
545		vp->v_cstart = lbn + 1;
546	} else
547		/*
548		 * In the middle of a cluster, so just delay the I/O for now.
549		 */
550		bdwrite(bp);
551	vp->v_lastw = lbn;
552	vp->v_lasta = bp->b_blkno;
553}
554
555
556/*
557 * This is an awful lot like cluster_rbuild...wish they could be combined.
558 * The last lbn argument is the current block on which I/O is being
559 * performed.  Check to see that it doesn't fall in the middle of
560 * the current block (if last_bp == NULL).
561 */
562void
563cluster_wbuild(vp, size, start_lbn, len)
564	struct vnode *vp;
565	long size;
566	daddr_t start_lbn;
567	int len;
568{
569	struct buf *bp, *tbp;
570	int i, j, s;
571	int dbsize = btodb(size);
572	int origlen = len;
573
574redo:
575	if (len == 0)
576		return;
577	if ( ((tbp = incore(vp, start_lbn)) == NULL) ||
578		((tbp->b_flags & (B_INVAL|B_BUSY|B_DELWRI)) != B_DELWRI)) {
579		++start_lbn;
580		--len;
581		goto redo;
582	}
583
584	tbp = getblk(vp, start_lbn, size, 0, 0);
585	if ((tbp->b_flags & B_DELWRI) == 0) {
586		++start_lbn;
587		--len;
588		brelse(tbp);
589		goto redo;
590	}
591	/*
592	 * Extra memory in the buffer, punt on this buffer. XXX we could
593	 * handle this in most cases, but we would have to push the extra
594	 * memory down to after our max possible cluster size and then
595	 * potentially pull it back up if the cluster was terminated
596	 * prematurely--too much hassle.
597	 */
598	if (((tbp->b_flags & (B_VMIO|B_CLUSTEROK)) != (B_VMIO|B_CLUSTEROK)) ||
599		(tbp->b_bcount != tbp->b_bufsize) ||
600		len == 1) {
601		bawrite(tbp);
602		++start_lbn;
603		--len;
604		goto redo;
605	}
606
607	bp = trypbuf();
608	if (bp == NULL) {
609		bawrite(tbp);
610		++start_lbn;
611		--len;
612		goto redo;
613	}
614
615	TAILQ_INIT(&bp->b_cluster.cluster_head);
616	bp->b_bcount = 0;
617	bp->b_bufsize = 0;
618	bp->b_npages = 0;
619
620	bp->b_blkno = tbp->b_blkno;
621	bp->b_lblkno = tbp->b_lblkno;
622	(vm_offset_t) bp->b_data |= ((vm_offset_t) tbp->b_data) & PAGE_MASK;
623	bp->b_flags |= B_CALL | B_BUSY | B_CLUSTER;
624	bp->b_iodone = cluster_callback;
625	pbgetvp(vp, bp);
626
627	for (i = 0; i < len; ++i, ++start_lbn) {
628		if (i != 0) {
629			s = splbio();
630			if ((tbp = incore(vp, start_lbn)) == NULL) {
631				splx(s);
632				break;
633			}
634
635			if ((tbp->b_flags & (B_CLUSTEROK|B_INVAL|B_BUSY|B_DELWRI)) != (B_DELWRI|B_CLUSTEROK)) {
636				splx(s);
637				break;
638			}
639
640			if ((tbp->b_bcount != size) ||
641				((bp->b_blkno + dbsize * i) != tbp->b_blkno) ||
642				((tbp->b_npages + bp->b_npages) > (MAXPHYS / PAGE_SIZE))) {
643				splx(s);
644				break;
645			}
646			bremfree(tbp);
647			tbp->b_flags |= B_BUSY;
648			tbp->b_flags &= ~B_DONE;
649			splx(s);
650		}
651		for (j = 0; j < tbp->b_npages; j += 1) {
652			vm_page_t m;
653			m = tbp->b_pages[j];
654			++m->busy;
655			++m->object->paging_in_progress;
656			if ((bp->b_npages == 0) ||
657				(bp->b_pages[bp->b_npages - 1] != m)) {
658				bp->b_pages[bp->b_npages] = m;
659				bp->b_npages++;
660			}
661		}
662		bp->b_bcount += size;
663		bp->b_bufsize += size;
664
665		tbp->b_flags &= ~(B_READ | B_DONE | B_ERROR | B_DELWRI);
666		tbp->b_flags |= B_ASYNC;
667		s = splbio();
668		reassignbuf(tbp, tbp->b_vp);	/* put on clean list */
669		++tbp->b_vp->v_numoutput;
670		splx(s);
671		TAILQ_INSERT_TAIL(&bp->b_cluster.cluster_head,
672			tbp, b_cluster.cluster_entry);
673	}
674	pmap_qenter(trunc_page((vm_offset_t) bp->b_data),
675		(vm_page_t *) bp->b_pages, bp->b_npages);
676	bawrite(bp);
677
678	len -= i;
679	goto redo;
680}
681
682#if 0
683/*
684 * Collect together all the buffers in a cluster.
685 * Plus add one additional buffer.
686 */
687struct cluster_save *
688cluster_collectbufs(vp, last_bp)
689	struct vnode *vp;
690	struct buf *last_bp;
691{
692	struct cluster_save *buflist;
693	daddr_t lbn;
694	int i, len;
695
696	len = vp->v_lastw - vp->v_cstart + 1;
697	buflist = malloc(sizeof(struct buf *) * (len + 1) + sizeof(*buflist),
698	    M_SEGMENT, M_WAITOK);
699	buflist->bs_nchildren = 0;
700	buflist->bs_children = (struct buf **) (buflist + 1);
701	for (lbn = vp->v_cstart, i = 0; i < len; lbn++, i++)
702		(void) bread(vp, lbn, last_bp->b_bcount, NOCRED,
703		    &buflist->bs_children[i]);
704	buflist->bs_children[i] = last_bp;
705	buflist->bs_nchildren = i + 1;
706	return (buflist);
707}
708#endif
709