vfs_cluster.c revision 254138
11541Srgrimes/*-
21541Srgrimes * Copyright (c) 1993
31541Srgrimes *	The Regents of the University of California.  All rights reserved.
45455Sdg * Modifications/enhancements:
55455Sdg * 	Copyright (c) 1995 John S. Dyson.  All rights reserved.
61541Srgrimes *
71541Srgrimes * Redistribution and use in source and binary forms, with or without
81541Srgrimes * modification, are permitted provided that the following conditions
91541Srgrimes * are met:
101541Srgrimes * 1. Redistributions of source code must retain the above copyright
111541Srgrimes *    notice, this list of conditions and the following disclaimer.
121541Srgrimes * 2. Redistributions in binary form must reproduce the above copyright
131541Srgrimes *    notice, this list of conditions and the following disclaimer in the
141541Srgrimes *    documentation and/or other materials provided with the distribution.
151541Srgrimes * 4. Neither the name of the University nor the names of its contributors
161541Srgrimes *    may be used to endorse or promote products derived from this software
171541Srgrimes *    without specific prior written permission.
181541Srgrimes *
191541Srgrimes * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
201541Srgrimes * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
211541Srgrimes * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
221541Srgrimes * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
231541Srgrimes * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
241541Srgrimes * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
251541Srgrimes * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
261541Srgrimes * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
271541Srgrimes * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
281541Srgrimes * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
291541Srgrimes * SUCH DAMAGE.
301541Srgrimes *
311541Srgrimes *	@(#)vfs_cluster.c	8.7 (Berkeley) 2/13/94
321541Srgrimes */
331541Srgrimes
34116182Sobrien#include <sys/cdefs.h>
35116182Sobrien__FBSDID("$FreeBSD: head/sys/kern/vfs_cluster.c 254138 2013-08-09 11:11:11Z attilio $");
36116182Sobrien
3732929Seivind#include "opt_debug_cluster.h"
3832929Seivind
391541Srgrimes#include <sys/param.h>
401549Srgrimes#include <sys/systm.h>
4141168Sbde#include <sys/kernel.h>
421541Srgrimes#include <sys/proc.h>
4360041Sphk#include <sys/bio.h>
441541Srgrimes#include <sys/buf.h>
451541Srgrimes#include <sys/vnode.h>
4641124Sdg#include <sys/malloc.h>
471541Srgrimes#include <sys/mount.h>
481541Srgrimes#include <sys/resourcevar.h>
49248084Sattilio#include <sys/rwlock.h>
5068885Sdillon#include <sys/vmmeter.h>
516621Sdg#include <vm/vm.h>
5210541Sdyson#include <vm/vm_object.h>
5310541Sdyson#include <vm/vm_page.h>
5448545Smckusick#include <sys/sysctl.h>
551541Srgrimes
5621002Sdyson#if defined(CLUSTERDEBUG)
5721002Sdysonstatic int	rcluster= 0;
5891690SeivindSYSCTL_INT(_debug, OID_AUTO, rcluster, CTLFLAG_RW, &rcluster, 0,
5991690Seivind    "Debug VFS clustering code");
6021002Sdyson#endif
6121002Sdyson
62167214Swkoszekstatic MALLOC_DEFINE(M_SEGMENT, "cl_savebuf", "cluster_save buffer");
6341124Sdg
64248508Skibstatic struct cluster_save *cluster_collectbufs(struct vnode *vp,
65248508Skib	    struct buf *last_bp, int gbflags);
66248508Skibstatic struct buf *cluster_rbuild(struct vnode *vp, u_quad_t filesize,
67248508Skib	    daddr_t lbn, daddr_t blkno, long size, int run, int gbflags,
68248508Skib	    struct buf *fbp);
69141628Sphkstatic void cluster_callback(struct buf *);
701541Srgrimes
7148545Smckusickstatic int write_behind = 1;
7291690SeivindSYSCTL_INT(_vfs, OID_AUTO, write_behind, CTLFLAG_RW, &write_behind, 0,
7391690Seivind    "Cluster write-behind; 0: disable, 1: enable, 2: backed off");
7448545Smckusick
75219699Sivorasstatic int read_max = 64;
76112080SjeffSYSCTL_INT(_vfs, OID_AUTO, read_max, CTLFLAG_RW, &read_max, 0,
77112080Sjeff    "Cluster read-ahead max block count");
78112080Sjeff
79250327Sscottlstatic int read_min = 1;
80250327SscottlSYSCTL_INT(_vfs, OID_AUTO, read_min, CTLFLAG_RW, &read_min, 0,
81250327Sscottl    "Cluster read min block count");
82250327Sscottl
8391690Seivind/* Page expended to mark partially backed buffers */
8412973Sbdeextern vm_page_t	bogus_page;
855455Sdg
8691690Seivind/*
8791690Seivind * Read data to a buf, including read-ahead if we find this to be beneficial.
8891690Seivind * cluster_read replaces bread.
8910541Sdyson */
901549Srgrimesint
91248282Skibcluster_read(struct vnode *vp, u_quad_t filesize, daddr_t lblkno, long size,
92248282Skib    struct ucred *cred, long totread, int seqcount, int gbflags,
93248282Skib    struct buf **bpp)
941541Srgrimes{
9521002Sdyson	struct buf *bp, *rbp, *reqbp;
96177493Sjeff	struct bufobj *bo;
9796572Sphk	daddr_t blkno, origblkno;
98112080Sjeff	int maxra, racluster;
99112080Sjeff	int error, ncontig;
10010541Sdyson	int i;
1011541Srgrimes
1021541Srgrimes	error = 0;
103177493Sjeff	bo = &vp->v_bufobj;
104248508Skib	if (!unmapped_buf_allowed)
105248508Skib		gbflags &= ~GB_UNMAPPED;
10621002Sdyson
1075455Sdg	/*
10821002Sdyson	 * Try to limit the amount of read-ahead by a few
10921002Sdyson	 * ad-hoc parameters.  This needs work!!!
11021002Sdyson	 */
11151797Sphk	racluster = vp->v_mount->mnt_iosize_max / size;
112112080Sjeff	maxra = seqcount;
113112080Sjeff	maxra = min(read_max, maxra);
114112080Sjeff	maxra = min(nbuf/8, maxra);
115112080Sjeff	if (((u_quad_t)(lblkno + maxra + 1) * size) > filesize)
116112080Sjeff		maxra = (filesize / size) - lblkno;
11721002Sdyson
11821002Sdyson	/*
1195455Sdg	 * get the requested block
1205455Sdg	 */
121248508Skib	*bpp = reqbp = bp = getblk(vp, lblkno, size, 0, 0, gbflags);
12221002Sdyson	origblkno = lblkno;
12312767Sdyson
1245455Sdg	/*
1255455Sdg	 * if it is in the cache, then check to see if the reads have been
1265455Sdg	 * sequential.  If they have, then try some read-ahead, otherwise
1275455Sdg	 * back-off on prospective read-aheads.
1285455Sdg	 */
1291541Srgrimes	if (bp->b_flags & B_CACHE) {
13021002Sdyson		if (!seqcount) {
1315455Sdg			return 0;
13221002Sdyson		} else if ((bp->b_flags & B_RAM) == 0) {
13321002Sdyson			return 0;
13421002Sdyson		} else {
13521002Sdyson			bp->b_flags &= ~B_RAM;
136251171Sjeff			BO_RLOCK(bo);
13748225Smckusick			for (i = 1; i < maxra; i++) {
13899737Sdillon				/*
13999737Sdillon				 * Stop if the buffer does not exist or it
14099737Sdillon				 * is invalid (about to go away?)
14199737Sdillon				 */
142136767Sphk				rbp = gbincore(&vp->v_bufobj, lblkno+i);
143112080Sjeff				if (rbp == NULL || (rbp->b_flags & B_INVAL))
14421002Sdyson					break;
14521002Sdyson
14621002Sdyson				/*
14748677Smckusick				 * Set another read-ahead mark so we know
148151621Sups				 * to check again. (If we can lock the
149151621Sups				 * buffer without waiting)
15021002Sdyson				 */
151151621Sups				if ((((i % racluster) == (racluster - 1)) ||
152151621Sups				    (i == (maxra - 1)))
153151621Sups				    && (0 == BUF_LOCK(rbp,
154151621Sups					LK_EXCLUSIVE | LK_NOWAIT, NULL))) {
155112080Sjeff					rbp->b_flags |= B_RAM;
156151621Sups					BUF_UNLOCK(rbp);
157151621Sups				}
15821002Sdyson			}
159251171Sjeff			BO_RUNLOCK(bo);
16021002Sdyson			if (i >= maxra) {
1615839Sdg				return 0;
16210541Sdyson			}
16321002Sdyson			lblkno += i;
16421002Sdyson		}
16521002Sdyson		reqbp = bp = NULL;
166111886Sjeff	/*
167111886Sjeff	 * If it isn't in the cache, then get a chunk from
168111886Sjeff	 * disk if sequential, otherwise just get the block.
169111886Sjeff	 */
17021002Sdyson	} else {
17142453Seivind		off_t firstread = bp->b_offset;
172111886Sjeff		int nblks;
173250327Sscottl		long minread;
17442453Seivind
17542408Seivind		KASSERT(bp->b_offset != NOOFFSET,
17642453Seivind		    ("cluster_read: no buffer offset"));
177111886Sjeff
178112080Sjeff		ncontig = 0;
179111886Sjeff
180111886Sjeff		/*
181250327Sscottl		 * Adjust totread if needed
182250327Sscottl		 */
183250327Sscottl		minread = read_min * size;
184250327Sscottl		if (minread > totread)
185250327Sscottl			totread = minread;
186250327Sscottl
187250327Sscottl		/*
188111886Sjeff		 * Compute the total number of blocks that we should read
189111886Sjeff		 * synchronously.
190111886Sjeff		 */
19121002Sdyson		if (firstread + totread > filesize)
19221002Sdyson			totread = filesize - firstread;
193111886Sjeff		nblks = howmany(totread, size);
194111886Sjeff		if (nblks > racluster)
195111886Sjeff			nblks = racluster;
19621002Sdyson
197111886Sjeff		/*
198111886Sjeff		 * Now compute the number of contiguous blocks.
199111886Sjeff		 */
200111886Sjeff		if (nblks > 1) {
20121002Sdyson	    		error = VOP_BMAP(vp, lblkno, NULL,
202112080Sjeff				&blkno, &ncontig, NULL);
203111886Sjeff			/*
204111886Sjeff			 * If this failed to map just do the original block.
205111886Sjeff			 */
206111886Sjeff			if (error || blkno == -1)
207112080Sjeff				ncontig = 0;
208111886Sjeff		}
20921002Sdyson
210111886Sjeff		/*
211111886Sjeff		 * If we have contiguous data available do a cluster
212111886Sjeff		 * otherwise just read the requested block.
213111886Sjeff		 */
214112080Sjeff		if (ncontig) {
215111886Sjeff			/* Account for our first block. */
216112080Sjeff			ncontig = min(ncontig + 1, nblks);
217112080Sjeff			if (ncontig < nblks)
218112080Sjeff				nblks = ncontig;
21921002Sdyson			bp = cluster_rbuild(vp, filesize, lblkno,
220248508Skib			    blkno, size, nblks, gbflags, bp);
22134694Sdyson			lblkno += (bp->b_bufsize / size);
22210541Sdyson		} else {
22358345Sphk			bp->b_flags |= B_RAM;
22458345Sphk			bp->b_iocmd = BIO_READ;
22510541Sdyson			lblkno += 1;
2268876Srgrimes		}
2271541Srgrimes	}
2285455Sdg
2295455Sdg	/*
230112080Sjeff	 * handle the synchronous read so that it is available ASAP.
2315455Sdg	 */
2325455Sdg	if (bp) {
23370374Sdillon		if ((bp->b_flags & B_CLUSTER) == 0) {
23436275Sdyson			vfs_busy_pages(bp, 0);
23570374Sdillon		}
23658934Sphk		bp->b_flags &= ~B_INVAL;
23758934Sphk		bp->b_ioflags &= ~BIO_ERROR;
23858345Sphk		if ((bp->b_flags & B_ASYNC) || bp->b_iodone != NULL)
23948333Speter			BUF_KERNPROC(bp);
240121205Sphk		bp->b_iooffset = dbtob(bp->b_blkno);
241136927Sphk		bstrategy(bp);
242170174Sjeff		curthread->td_ru.ru_inblock++;
2435455Sdg	}
24434611Sdyson
2455455Sdg	/*
246112080Sjeff	 * If we have been doing sequential I/O, then do some read-ahead.
2475455Sdg	 */
248112080Sjeff	while (lblkno < (origblkno + maxra)) {
249112080Sjeff		error = VOP_BMAP(vp, lblkno, NULL, &blkno, &ncontig, NULL);
250112080Sjeff		if (error)
251112080Sjeff			break;
252112080Sjeff
253112080Sjeff		if (blkno == -1)
254112080Sjeff			break;
255112080Sjeff
256112080Sjeff		/*
257112080Sjeff		 * We could throttle ncontig here by maxra but we might as
258112080Sjeff		 * well read the data if it is contiguous.  We're throttled
259112080Sjeff		 * by racluster anyway.
260112080Sjeff		 */
261112080Sjeff		if (ncontig) {
262112080Sjeff			ncontig = min(ncontig + 1, racluster);
263112080Sjeff			rbp = cluster_rbuild(vp, filesize, lblkno, blkno,
264248508Skib			    size, ncontig, gbflags, NULL);
265112080Sjeff			lblkno += (rbp->b_bufsize / size);
266112838Sjeff			if (rbp->b_flags & B_DELWRI) {
267112838Sjeff				bqrelse(rbp);
268112838Sjeff				continue;
269112838Sjeff			}
270112080Sjeff		} else {
271248508Skib			rbp = getblk(vp, lblkno, size, 0, 0, gbflags);
272112838Sjeff			lblkno += 1;
273112838Sjeff			if (rbp->b_flags & B_DELWRI) {
274112838Sjeff				bqrelse(rbp);
275112838Sjeff				continue;
276112838Sjeff			}
277112080Sjeff			rbp->b_flags |= B_ASYNC | B_RAM;
278112080Sjeff			rbp->b_iocmd = BIO_READ;
279112080Sjeff			rbp->b_blkno = blkno;
280112080Sjeff		}
281112080Sjeff		if (rbp->b_flags & B_CACHE) {
28258345Sphk			rbp->b_flags &= ~B_ASYNC;
28313490Sdyson			bqrelse(rbp);
284112080Sjeff			continue;
2855455Sdg		}
286112080Sjeff		if ((rbp->b_flags & B_CLUSTER) == 0) {
287112080Sjeff			vfs_busy_pages(rbp, 0);
288112080Sjeff		}
289112080Sjeff		rbp->b_flags &= ~B_INVAL;
290112080Sjeff		rbp->b_ioflags &= ~BIO_ERROR;
291112080Sjeff		if ((rbp->b_flags & B_ASYNC) || rbp->b_iodone != NULL)
292112080Sjeff			BUF_KERNPROC(rbp);
293121205Sphk		rbp->b_iooffset = dbtob(rbp->b_blkno);
294136927Sphk		bstrategy(rbp);
295170174Sjeff		curthread->td_ru.ru_inblock++;
2965455Sdg	}
297112080Sjeff
29821002Sdyson	if (reqbp)
29959762Sphk		return (bufwait(reqbp));
30021002Sdyson	else
30121002Sdyson		return (error);
3021541Srgrimes}
3031541Srgrimes
3041541Srgrimes/*
3051541Srgrimes * If blocks are contiguous on disk, use this to provide clustered
3061541Srgrimes * read ahead.  We will read as many blocks as possible sequentially
3071541Srgrimes * and then parcel them up into logical blocks in the buffer hash table.
3081541Srgrimes */
30910541Sdysonstatic struct buf *
310248508Skibcluster_rbuild(struct vnode *vp, u_quad_t filesize, daddr_t lbn,
311248508Skib    daddr_t blkno, long size, int run, int gbflags, struct buf *fbp)
3121541Srgrimes{
313177493Sjeff	struct bufobj *bo;
31410541Sdyson	struct buf *bp, *tbp;
3151541Srgrimes	daddr_t bn;
316195122Salc	off_t off;
317195122Salc	long tinc, tsize;
318195122Salc	int i, inc, j, toff;
3191541Srgrimes
32042408Seivind	KASSERT(size == vp->v_mount->mnt_stat.f_iosize,
321239315Salc	    ("cluster_rbuild: size %ld != f_iosize %jd\n",
322122537Smckusick	    size, (intmax_t)vp->v_mount->mnt_stat.f_iosize));
32342453Seivind
32412767Sdyson	/*
32512767Sdyson	 * avoid a division
32612767Sdyson	 */
32712767Sdyson	while ((u_quad_t) size * (lbn + run) > filesize) {
3281541Srgrimes		--run;
32912767Sdyson	}
33010541Sdyson
33121002Sdyson	if (fbp) {
33221002Sdyson		tbp = fbp;
33358345Sphk		tbp->b_iocmd = BIO_READ;
33421002Sdyson	} else {
335248508Skib		tbp = getblk(vp, lbn, size, 0, 0, gbflags);
33621002Sdyson		if (tbp->b_flags & B_CACHE)
33721002Sdyson			return tbp;
33858345Sphk		tbp->b_flags |= B_ASYNC | B_RAM;
33958345Sphk		tbp->b_iocmd = BIO_READ;
34021002Sdyson	}
34110541Sdyson	tbp->b_blkno = blkno;
34216086Sdyson	if( (tbp->b_flags & B_MALLOC) ||
34316086Sdyson		((tbp->b_flags & B_VMIO) == 0) || (run <= 1) )
34410541Sdyson		return tbp;
34510541Sdyson
34642957Sdillon	bp = trypbuf(&cluster_pbuf_freecnt);
34710541Sdyson	if (bp == 0)
34810541Sdyson		return tbp;
34910541Sdyson
35085272Sdillon	/*
35185272Sdillon	 * We are synthesizing a buffer out of vm_page_t's, but
35285272Sdillon	 * if the block size is not page aligned then the starting
35385272Sdillon	 * address may not be either.  Inherit the b_data offset
35485272Sdillon	 * from the original buffer.
35585272Sdillon	 */
35658345Sphk	bp->b_flags = B_ASYNC | B_CLUSTER | B_VMIO;
357248508Skib	if ((gbflags & GB_UNMAPPED) != 0) {
358248508Skib		bp->b_flags |= B_UNMAPPED;
359248508Skib		bp->b_data = unmapped_buf;
360248508Skib	} else {
361248508Skib		bp->b_data = (char *)((vm_offset_t)bp->b_data |
362248508Skib		    ((vm_offset_t)tbp->b_data & PAGE_MASK));
363248508Skib	}
36458345Sphk	bp->b_iocmd = BIO_READ;
3655455Sdg	bp->b_iodone = cluster_callback;
3665455Sdg	bp->b_blkno = blkno;
3675455Sdg	bp->b_lblkno = lbn;
36834611Sdyson	bp->b_offset = tbp->b_offset;
36942453Seivind	KASSERT(bp->b_offset != NOOFFSET, ("cluster_rbuild: no buffer offset"));
3705455Sdg	pbgetvp(vp, bp);
3711541Srgrimes
37212404Sdyson	TAILQ_INIT(&bp->b_cluster.cluster_head);
3731541Srgrimes
3745455Sdg	bp->b_bcount = 0;
3755455Sdg	bp->b_bufsize = 0;
3765455Sdg	bp->b_npages = 0;
3775455Sdg
3781541Srgrimes	inc = btodb(size);
379177493Sjeff	bo = &vp->v_bufobj;
38010541Sdyson	for (bn = blkno, i = 0; i < run; ++i, bn += inc) {
3815455Sdg		if (i != 0) {
38212767Sdyson			if ((bp->b_npages * PAGE_SIZE) +
38385272Sdillon			    round_page(size) > vp->v_mount->mnt_iosize_max) {
38410541Sdyson				break;
38585272Sdillon			}
38610978Sdyson
387248508Skib			tbp = getblk(vp, lbn + i, size, 0, 0, GB_LOCK_NOWAIT |
388248508Skib			    (gbflags & GB_UNMAPPED));
38912767Sdyson
390111886Sjeff			/* Don't wait around for locked bufs. */
391111886Sjeff			if (tbp == NULL)
392111886Sjeff				break;
39334611Sdyson
39471230Sdillon			/*
39585272Sdillon			 * Stop scanning if the buffer is fully valid
39685272Sdillon			 * (marked B_CACHE), or locked (may be doing a
39785272Sdillon			 * background write), or if the buffer is not
39885272Sdillon			 * VMIO backed.  The clustering code can only deal
399251171Sjeff			 * with VMIO-backed buffers.  The bo lock is not
400251171Sjeff			 * required for the BKGRDINPROG check since it
401251171Sjeff			 * can not be set without the buf lock.
40271230Sdillon			 */
403119521Sjeff			if ((tbp->b_vflags & BV_BKGRDINPROG) ||
404119521Sjeff			    (tbp->b_flags & B_CACHE) ||
405119521Sjeff			    (tbp->b_flags & B_VMIO) == 0) {
40613490Sdyson				bqrelse(tbp);
4075455Sdg				break;
4085455Sdg			}
40910541Sdyson
41085272Sdillon			/*
41185272Sdillon			 * The buffer must be completely invalid in order to
41285272Sdillon			 * take part in the cluster.  If it is partially valid
41385272Sdillon			 * then we stop.
41485272Sdillon			 */
415195122Salc			off = tbp->b_offset;
416195122Salc			tsize = size;
417248084Sattilio			VM_OBJECT_WLOCK(tbp->b_bufobj->bo_object);
418195122Salc			for (j = 0; tsize > 0; j++) {
419195122Salc				toff = off & PAGE_MASK;
420195122Salc				tinc = tsize;
421195122Salc				if (toff + tinc > PAGE_SIZE)
422195122Salc					tinc = PAGE_SIZE - toff;
423248084Sattilio				VM_OBJECT_ASSERT_WLOCKED(tbp->b_pages[j]->object);
424195122Salc				if ((tbp->b_pages[j]->valid &
425195122Salc				    vm_page_bits(toff, tinc)) != 0)
42610541Sdyson					break;
427195122Salc				off += tinc;
428195122Salc				tsize -= tinc;
42971230Sdillon			}
430248084Sattilio			VM_OBJECT_WUNLOCK(tbp->b_bufobj->bo_object);
431195122Salc			if (tsize > 0) {
43234611Sdyson				bqrelse(tbp);
43310541Sdyson				break;
43410541Sdyson			}
43510541Sdyson
43685272Sdillon			/*
43785272Sdillon			 * Set a read-ahead mark as appropriate
43885272Sdillon			 */
43921002Sdyson			if ((fbp && (i == 1)) || (i == (run - 1)))
44021002Sdyson				tbp->b_flags |= B_RAM;
44185272Sdillon
44285272Sdillon			/*
44385272Sdillon			 * Set the buffer up for an async read (XXX should
44485272Sdillon			 * we do this only if we do not wind up brelse()ing?).
44585272Sdillon			 * Set the block number if it isn't set, otherwise
44685272Sdillon			 * if it is make sure it matches the block number we
44785272Sdillon			 * expect.
44885272Sdillon			 */
44958345Sphk			tbp->b_flags |= B_ASYNC;
45058345Sphk			tbp->b_iocmd = BIO_READ;
45112767Sdyson			if (tbp->b_blkno == tbp->b_lblkno) {
45210541Sdyson				tbp->b_blkno = bn;
45310541Sdyson			} else if (tbp->b_blkno != bn) {
45410541Sdyson				brelse(tbp);
45510541Sdyson				break;
45610541Sdyson			}
4571541Srgrimes		}
45848333Speter		/*
45948333Speter		 * XXX fbp from caller may not be B_ASYNC, but we are going
46048333Speter		 * to biodone() it in cluster_callback() anyway
46148333Speter		 */
46248333Speter		BUF_KERNPROC(tbp);
46312404Sdyson		TAILQ_INSERT_TAIL(&bp->b_cluster.cluster_head,
46412404Sdyson			tbp, b_cluster.cluster_entry);
465248084Sattilio		VM_OBJECT_WLOCK(tbp->b_bufobj->bo_object);
4665455Sdg		for (j = 0; j < tbp->b_npages; j += 1) {
46710541Sdyson			vm_page_t m;
46810541Sdyson			m = tbp->b_pages[j];
469254138Sattilio			vm_page_sbusy(m);
47038517Sdfr			vm_object_pip_add(m->object, 1);
47110541Sdyson			if ((bp->b_npages == 0) ||
47212413Sdyson				(bp->b_pages[bp->b_npages-1] != m)) {
47310541Sdyson				bp->b_pages[bp->b_npages] = m;
47410541Sdyson				bp->b_npages++;
47510541Sdyson			}
476193643Salc			if (m->valid == VM_PAGE_BITS_ALL)
47718737Sdyson				tbp->b_pages[j] = bogus_page;
4781541Srgrimes		}
479248084Sattilio		VM_OBJECT_WUNLOCK(tbp->b_bufobj->bo_object);
48085511Sdillon		/*
48185511Sdillon		 * Don't inherit tbp->b_bufsize as it may be larger due to
48285511Sdillon		 * a non-page-aligned size.  Instead just aggregate using
48385511Sdillon		 * 'size'.
48485511Sdillon		 */
48585511Sdillon		if (tbp->b_bcount != size)
48685511Sdillon			printf("warning: tbp->b_bcount wrong %ld vs %ld\n", tbp->b_bcount, size);
48785511Sdillon		if (tbp->b_bufsize != size)
48885511Sdillon			printf("warning: tbp->b_bufsize wrong %ld vs %ld\n", tbp->b_bufsize, size);
48985511Sdillon		bp->b_bcount += size;
49085511Sdillon		bp->b_bufsize += size;
4911541Srgrimes	}
49218737Sdyson
49385272Sdillon	/*
49485272Sdillon	 * Fully valid pages in the cluster are already good and do not need
49585272Sdillon	 * to be re-read from disk.  Replace the page with bogus_page
49685272Sdillon	 */
497248084Sattilio	VM_OBJECT_WLOCK(bp->b_bufobj->bo_object);
49885272Sdillon	for (j = 0; j < bp->b_npages; j++) {
499248084Sattilio		VM_OBJECT_ASSERT_WLOCKED(bp->b_pages[j]->object);
500193643Salc		if (bp->b_pages[j]->valid == VM_PAGE_BITS_ALL)
50118737Sdyson			bp->b_pages[j] = bogus_page;
50218737Sdyson	}
503248084Sattilio	VM_OBJECT_WUNLOCK(bp->b_bufobj->bo_object);
50420054Sdyson	if (bp->b_bufsize > bp->b_kvasize)
50537559Sbde		panic("cluster_rbuild: b_bufsize(%ld) > b_kvasize(%d)\n",
50637559Sbde		    bp->b_bufsize, bp->b_kvasize);
50720054Sdyson	bp->b_kvasize = bp->b_bufsize;
50818737Sdyson
509248508Skib	if ((bp->b_flags & B_UNMAPPED) == 0) {
510248508Skib		pmap_qenter(trunc_page((vm_offset_t) bp->b_data),
511248508Skib		    (vm_page_t *)bp->b_pages, bp->b_npages);
512248508Skib	}
5135455Sdg	return (bp);
5141541Srgrimes}
5151541Srgrimes
5161541Srgrimes/*
5171541Srgrimes * Cleanup after a clustered read or write.
5181541Srgrimes * This is complicated by the fact that any of the buffers might have
5191541Srgrimes * extra memory (if there were no empty buffer headers at allocbuf time)
5201541Srgrimes * that we will need to shift around.
5211541Srgrimes */
522141628Sphkstatic void
5231541Srgrimescluster_callback(bp)
5241541Srgrimes	struct buf *bp;
5251541Srgrimes{
52612404Sdyson	struct buf *nbp, *tbp;
5271541Srgrimes	int error = 0;
5281541Srgrimes
5291541Srgrimes	/*
5301541Srgrimes	 * Must propogate errors to all the components.
5311541Srgrimes	 */
53258934Sphk	if (bp->b_ioflags & BIO_ERROR)
5331541Srgrimes		error = bp->b_error;
5341541Srgrimes
535248508Skib	if ((bp->b_flags & B_UNMAPPED) == 0) {
536248508Skib		pmap_qremove(trunc_page((vm_offset_t) bp->b_data),
537248508Skib		    bp->b_npages);
538248508Skib	}
5391541Srgrimes	/*
5401541Srgrimes	 * Move memory from the large cluster buffer into the component
5411541Srgrimes	 * buffers and mark IO as done on these.
5421541Srgrimes	 */
54321002Sdyson	for (tbp = TAILQ_FIRST(&bp->b_cluster.cluster_head);
54412404Sdyson		tbp; tbp = nbp) {
54521002Sdyson		nbp = TAILQ_NEXT(&tbp->b_cluster, cluster_entry);
5461541Srgrimes		if (error) {
54758934Sphk			tbp->b_ioflags |= BIO_ERROR;
5481541Srgrimes			tbp->b_error = error;
54946349Salc		} else {
55046349Salc			tbp->b_dirtyoff = tbp->b_dirtyend = 0;
55158934Sphk			tbp->b_flags &= ~B_INVAL;
55258934Sphk			tbp->b_ioflags &= ~BIO_ERROR;
55377115Sdillon			/*
55477115Sdillon			 * XXX the bdwrite()/bqrelse() issued during
55577115Sdillon			 * cluster building clears B_RELBUF (see bqrelse()
55677115Sdillon			 * comment).  If direct I/O was specified, we have
55777115Sdillon			 * to restore it here to allow the buffer and VM
55877115Sdillon			 * to be freed.
55977115Sdillon			 */
56077115Sdillon			if (tbp->b_flags & B_DIRECT)
56177115Sdillon				tbp->b_flags |= B_RELBUF;
56246349Salc		}
56359249Sphk		bufdone(tbp);
5641541Srgrimes	}
565137719Sphk	pbrelvp(bp);
56642957Sdillon	relpbuf(bp, &cluster_pbuf_freecnt);
5671541Srgrimes}
5681541Srgrimes
5691541Srgrimes/*
57048545Smckusick *	cluster_wbuild_wb:
57148545Smckusick *
57248545Smckusick *	Implement modified write build for cluster.
57348545Smckusick *
57448545Smckusick *		write_behind = 0	write behind disabled
57548545Smckusick *		write_behind = 1	write behind normal (default)
57648545Smckusick *		write_behind = 2	write behind backed-off
57748545Smckusick */
57848545Smckusick
57948545Smckusickstatic __inline int
580248508Skibcluster_wbuild_wb(struct vnode *vp, long size, daddr_t start_lbn, int len,
581248508Skib    int gbflags)
58248545Smckusick{
58348545Smckusick	int r = 0;
58448545Smckusick
585248283Skib	switch (write_behind) {
58648545Smckusick	case 2:
58748545Smckusick		if (start_lbn < len)
58848545Smckusick			break;
58948545Smckusick		start_lbn -= len;
590102412Scharnier		/* FALLTHROUGH */
59148545Smckusick	case 1:
592248508Skib		r = cluster_wbuild(vp, size, start_lbn, len, gbflags);
593102412Scharnier		/* FALLTHROUGH */
59448545Smckusick	default:
595102412Scharnier		/* FALLTHROUGH */
59648545Smckusick		break;
59748545Smckusick	}
59848545Smckusick	return(r);
59948545Smckusick}
60048545Smckusick
60148545Smckusick/*
6021541Srgrimes * Do clustered write for FFS.
6031541Srgrimes *
6041541Srgrimes * Three cases:
6051541Srgrimes *	1. Write is not sequential (write asynchronously)
6061541Srgrimes *	Write is sequential:
6071541Srgrimes *	2.	beginning of cluster - begin cluster
6081541Srgrimes *	3.	middle of a cluster - add to cluster
6091541Srgrimes *	4.	end of a cluster - asynchronously write cluster
6101541Srgrimes */
6111541Srgrimesvoid
612248282Skibcluster_write(struct vnode *vp, struct buf *bp, u_quad_t filesize, int seqcount,
613248282Skib    int gbflags)
6141541Srgrimes{
6155455Sdg	daddr_t lbn;
6165455Sdg	int maxclen, cursize;
6175455Sdg	int lblocksize;
61812404Sdyson	int async;
6191541Srgrimes
620248508Skib	if (!unmapped_buf_allowed)
621248508Skib		gbflags &= ~GB_UNMAPPED;
622248508Skib
62332286Sdyson	if (vp->v_type == VREG) {
624231204Skib		async = DOINGASYNC(vp);
62532286Sdyson		lblocksize = vp->v_mount->mnt_stat.f_iosize;
62632286Sdyson	} else {
62732286Sdyson		async = 0;
62832286Sdyson		lblocksize = bp->b_bufsize;
62932286Sdyson	}
6305455Sdg	lbn = bp->b_lblkno;
63142408Seivind	KASSERT(bp->b_offset != NOOFFSET, ("cluster_write: no buffer offset"));
63234694Sdyson
6331541Srgrimes	/* Initialize vnode to beginning of file. */
6341541Srgrimes	if (lbn == 0)
6351541Srgrimes		vp->v_lasta = vp->v_clen = vp->v_cstart = vp->v_lastw = 0;
6361541Srgrimes
6375455Sdg	if (vp->v_clen == 0 || lbn != vp->v_lastw + 1 ||
6385455Sdg	    (bp->b_blkno != vp->v_lasta + btodb(lblocksize))) {
63951797Sphk		maxclen = vp->v_mount->mnt_iosize_max / lblocksize - 1;
6401541Srgrimes		if (vp->v_clen != 0) {
6411541Srgrimes			/*
6421541Srgrimes			 * Next block is not sequential.
6438876Srgrimes			 *
6441541Srgrimes			 * If we are not writing at end of file, the process
6455455Sdg			 * seeked to another point in the file since its last
6465455Sdg			 * write, or we have reached our maximum cluster size,
6475455Sdg			 * then push the previous cluster. Otherwise try
6485455Sdg			 * reallocating to make it sequential.
64958909Sdillon			 *
65058909Sdillon			 * Change to algorithm: only push previous cluster if
65158909Sdillon			 * it was sequential from the point of view of the
65258909Sdillon			 * seqcount heuristic, otherwise leave the buffer
65358909Sdillon			 * intact so we can potentially optimize the I/O
65458909Sdillon			 * later on in the buf_daemon or update daemon
65558909Sdillon			 * flush.
6561541Srgrimes			 */
6571541Srgrimes			cursize = vp->v_lastw - vp->v_cstart + 1;
65834611Sdyson			if (((u_quad_t) bp->b_offset + lblocksize) != filesize ||
65910541Sdyson			    lbn != vp->v_lastw + 1 || vp->v_clen <= cursize) {
66058909Sdillon				if (!async && seqcount > 0) {
66148677Smckusick					cluster_wbuild_wb(vp, lblocksize,
662248508Skib					    vp->v_cstart, cursize, gbflags);
66358909Sdillon				}
66410541Sdyson			} else {
66510541Sdyson				struct buf **bpp, **endbp;
66610541Sdyson				struct cluster_save *buflist;
66710541Sdyson
668248508Skib				buflist = cluster_collectbufs(vp, bp, gbflags);
66910541Sdyson				endbp = &buflist->bs_children
67010541Sdyson				    [buflist->bs_nchildren - 1];
67110541Sdyson				if (VOP_REALLOCBLKS(vp, buflist)) {
67210541Sdyson					/*
67358909Sdillon					 * Failed, push the previous cluster
67458909Sdillon					 * if *really* writing sequentially
67558909Sdillon					 * in the logical file (seqcount > 1),
67658909Sdillon					 * otherwise delay it in the hopes that
67758909Sdillon					 * the low level disk driver can
67858909Sdillon					 * optimize the write ordering.
67910541Sdyson					 */
68010541Sdyson					for (bpp = buflist->bs_children;
68110541Sdyson					     bpp < endbp; bpp++)
68210541Sdyson						brelse(*bpp);
68310541Sdyson					free(buflist, M_SEGMENT);
68458909Sdillon					if (seqcount > 1) {
68558909Sdillon						cluster_wbuild_wb(vp,
68658909Sdillon						    lblocksize, vp->v_cstart,
687248508Skib						    cursize, gbflags);
68858909Sdillon					}
68910541Sdyson				} else {
69010541Sdyson					/*
69110541Sdyson					 * Succeeded, keep building cluster.
69210541Sdyson					 */
69310541Sdyson					for (bpp = buflist->bs_children;
69410541Sdyson					     bpp <= endbp; bpp++)
69510541Sdyson						bdwrite(*bpp);
69610541Sdyson					free(buflist, M_SEGMENT);
69710541Sdyson					vp->v_lastw = lbn;
69810541Sdyson					vp->v_lasta = bp->b_blkno;
69910541Sdyson					return;
70010541Sdyson				}
70110541Sdyson			}
7021541Srgrimes		}
7031541Srgrimes		/*
7045455Sdg		 * Consider beginning a cluster. If at end of file, make
7055455Sdg		 * cluster as large as possible, otherwise find size of
7065455Sdg		 * existing cluster.
7071541Srgrimes		 */
70832286Sdyson		if ((vp->v_type == VREG) &&
70934611Sdyson			((u_quad_t) bp->b_offset + lblocksize) != filesize &&
7107613Sdg		    (bp->b_blkno == bp->b_lblkno) &&
71110551Sdyson		    (VOP_BMAP(vp, lbn, NULL, &bp->b_blkno, &maxclen, NULL) ||
71210541Sdyson		     bp->b_blkno == -1)) {
7131541Srgrimes			bawrite(bp);
7141541Srgrimes			vp->v_clen = 0;
7151541Srgrimes			vp->v_lasta = bp->b_blkno;
7161541Srgrimes			vp->v_cstart = lbn + 1;
7171541Srgrimes			vp->v_lastw = lbn;
7181541Srgrimes			return;
7191541Srgrimes		}
7205455Sdg		vp->v_clen = maxclen;
72112404Sdyson		if (!async && maxclen == 0) {	/* I/O not contiguous */
7221541Srgrimes			vp->v_cstart = lbn + 1;
72313490Sdyson			bawrite(bp);
7245455Sdg		} else {	/* Wait for rest of cluster */
7251541Srgrimes			vp->v_cstart = lbn;
7265455Sdg			bdwrite(bp);
7271541Srgrimes		}
7281541Srgrimes	} else if (lbn == vp->v_cstart + vp->v_clen) {
7291541Srgrimes		/*
73058909Sdillon		 * At end of cluster, write it out if seqcount tells us we
73158909Sdillon		 * are operating sequentially, otherwise let the buf or
73258909Sdillon		 * update daemon handle it.
7331541Srgrimes		 */
73412404Sdyson		bdwrite(bp);
735248508Skib		if (seqcount > 1) {
736248508Skib			cluster_wbuild_wb(vp, lblocksize, vp->v_cstart,
737248508Skib			    vp->v_clen + 1, gbflags);
738248508Skib		}
7391541Srgrimes		vp->v_clen = 0;
7401541Srgrimes		vp->v_cstart = lbn + 1;
74168885Sdillon	} else if (vm_page_count_severe()) {
74268885Sdillon		/*
74368885Sdillon		 * We are low on memory, get it going NOW
74468885Sdillon		 */
74568885Sdillon		bawrite(bp);
74658909Sdillon	} else {
7471541Srgrimes		/*
7485455Sdg		 * In the middle of a cluster, so just delay the I/O for now.
7491541Srgrimes		 */
7501541Srgrimes		bdwrite(bp);
75158909Sdillon	}
7521541Srgrimes	vp->v_lastw = lbn;
7531541Srgrimes	vp->v_lasta = bp->b_blkno;
7541541Srgrimes}
7551541Srgrimes
7561541Srgrimes
7571541Srgrimes/*
7581541Srgrimes * This is an awful lot like cluster_rbuild...wish they could be combined.
7591541Srgrimes * The last lbn argument is the current block on which I/O is being
7601541Srgrimes * performed.  Check to see that it doesn't fall in the middle of
7611541Srgrimes * the current block (if last_bp == NULL).
7621541Srgrimes */
76312767Sdysonint
764248282Skibcluster_wbuild(struct vnode *vp, long size, daddr_t start_lbn, int len,
765248282Skib    int gbflags)
7661541Srgrimes{
76712404Sdyson	struct buf *bp, *tbp;
768177493Sjeff	struct bufobj *bo;
769145734Sjeff	int i, j;
77012767Sdyson	int totalwritten = 0;
77112404Sdyson	int dbsize = btodb(size);
77235595Sbde
773248508Skib	if (!unmapped_buf_allowed)
774248508Skib		gbflags &= ~GB_UNMAPPED;
775248508Skib
776177493Sjeff	bo = &vp->v_bufobj;
77712767Sdyson	while (len > 0) {
77871230Sdillon		/*
77971230Sdillon		 * If the buffer is not delayed-write (i.e. dirty), or it
78071230Sdillon		 * is delayed-write but either locked or inval, it cannot
78172080Sasmodai		 * partake in the clustered write.
78271230Sdillon		 */
783177493Sjeff		BO_LOCK(bo);
784136767Sphk		if ((tbp = gbincore(&vp->v_bufobj, start_lbn)) == NULL ||
785119521Sjeff		    (tbp->b_vflags & BV_BKGRDINPROG)) {
786177493Sjeff			BO_UNLOCK(bo);
78712767Sdyson			++start_lbn;
78812767Sdyson			--len;
78912767Sdyson			continue;
79012767Sdyson		}
791111886Sjeff		if (BUF_LOCK(tbp,
792251171Sjeff		    LK_EXCLUSIVE | LK_NOWAIT | LK_INTERLOCK, BO_LOCKPTR(bo))) {
793111886Sjeff			++start_lbn;
794111886Sjeff			--len;
795111886Sjeff			continue;
796111886Sjeff		}
797119521Sjeff		if ((tbp->b_flags & (B_INVAL | B_DELWRI)) != B_DELWRI) {
798111886Sjeff			BUF_UNLOCK(tbp);
799111886Sjeff			++start_lbn;
800111886Sjeff			--len;
801111886Sjeff			continue;
802111886Sjeff		}
803153192Srodrigc		if (tbp->b_pin_count >  0) {
804153192Srodrigc			BUF_UNLOCK(tbp);
805153192Srodrigc			++start_lbn;
806153192Srodrigc			--len;
807153192Srodrigc			continue;
808153192Srodrigc		}
80912767Sdyson		bremfree(tbp);
81012767Sdyson		tbp->b_flags &= ~B_DONE;
8111541Srgrimes
81247967Sjulian		/*
81347967Sjulian		 * Extra memory in the buffer, punt on this buffer.
81447967Sjulian		 * XXX we could handle this in most cases, but we would
81547967Sjulian		 * have to push the extra memory down to after our max
81647967Sjulian		 * possible cluster size and then potentially pull it back
81747967Sjulian		 * up if the cluster was terminated prematurely--too much
81847967Sjulian		 * hassle.
81947967Sjulian		 */
82068868Stegge		if (((tbp->b_flags & (B_CLUSTEROK | B_MALLOC | B_VMIO)) !=
82168868Stegge		     (B_CLUSTEROK | B_VMIO)) ||
82234630Sjulian		  (tbp->b_bcount != tbp->b_bufsize) ||
82334630Sjulian		  (tbp->b_bcount != size) ||
82434630Sjulian		  (len == 1) ||
82547948Sdg		  ((bp = getpbuf(&cluster_pbuf_freecnt)) == NULL)) {
82612767Sdyson			totalwritten += tbp->b_bufsize;
82712767Sdyson			bawrite(tbp);
82812767Sdyson			++start_lbn;
82912767Sdyson			--len;
83012767Sdyson			continue;
83112767Sdyson		}
83212404Sdyson
83334630Sjulian		/*
83434630Sjulian		 * We got a pbuf to make the cluster in.
83534630Sjulian		 * so initialise it.
83634630Sjulian		 */
83712767Sdyson		TAILQ_INIT(&bp->b_cluster.cluster_head);
83812767Sdyson		bp->b_bcount = 0;
83912767Sdyson		bp->b_bufsize = 0;
84012767Sdyson		bp->b_npages = 0;
84184827Sjhb		if (tbp->b_wcred != NOCRED)
84284827Sjhb			bp->b_wcred = crhold(tbp->b_wcred);
8431541Srgrimes
84412767Sdyson		bp->b_blkno = tbp->b_blkno;
84512767Sdyson		bp->b_lblkno = tbp->b_lblkno;
84634611Sdyson		bp->b_offset = tbp->b_offset;
84785272Sdillon
84885272Sdillon		/*
84985272Sdillon		 * We are synthesizing a buffer out of vm_page_t's, but
85085272Sdillon		 * if the block size is not page aligned then the starting
85185272Sdillon		 * address may not be either.  Inherit the b_data offset
85285272Sdillon		 * from the original buffer.
85385272Sdillon		 */
854248508Skib		if ((gbflags & GB_UNMAPPED) == 0 ||
855248508Skib		    (tbp->b_flags & B_VMIO) == 0) {
856248508Skib			bp->b_data = (char *)((vm_offset_t)bp->b_data |
857248508Skib			    ((vm_offset_t)tbp->b_data & PAGE_MASK));
858248508Skib		} else {
859248508Skib			bp->b_flags |= B_UNMAPPED;
860248508Skib			bp->b_data = unmapped_buf;
861248508Skib		}
862248508Skib		bp->b_flags |= B_CLUSTER | (tbp->b_flags & (B_VMIO |
863248508Skib		    B_NEEDCOMMIT));
86412767Sdyson		bp->b_iodone = cluster_callback;
86512767Sdyson		pbgetvp(vp, bp);
86634630Sjulian		/*
86734630Sjulian		 * From this location in the file, scan forward to see
86834630Sjulian		 * if there are buffers with adjacent data that need to
86934630Sjulian		 * be written as well.
87034630Sjulian		 */
87112767Sdyson		for (i = 0; i < len; ++i, ++start_lbn) {
87234630Sjulian			if (i != 0) { /* If not the first buffer */
87334630Sjulian				/*
87434630Sjulian				 * If the adjacent data is not even in core it
87534630Sjulian				 * can't need to be written.
87634630Sjulian				 */
877177493Sjeff				BO_LOCK(bo);
878177493Sjeff				if ((tbp = gbincore(bo, start_lbn)) == NULL ||
879119521Sjeff				    (tbp->b_vflags & BV_BKGRDINPROG)) {
880177493Sjeff					BO_UNLOCK(bo);
88112767Sdyson					break;
88212767Sdyson				}
8831541Srgrimes
88434630Sjulian				/*
88534630Sjulian				 * If it IS in core, but has different
88671230Sdillon				 * characteristics, or is locked (which
88771230Sdillon				 * means it could be undergoing a background
88871230Sdillon				 * I/O or be in a weird state), then don't
88971230Sdillon				 * cluster with it.
89034630Sjulian				 */
891111886Sjeff				if (BUF_LOCK(tbp,
892111886Sjeff				    LK_EXCLUSIVE | LK_NOWAIT | LK_INTERLOCK,
893251171Sjeff				    BO_LOCKPTR(bo)))
894111886Sjeff					break;
895111886Sjeff
89648225Smckusick				if ((tbp->b_flags & (B_VMIO | B_CLUSTEROK |
89748225Smckusick				    B_INVAL | B_DELWRI | B_NEEDCOMMIT))
898111886Sjeff				    != (B_DELWRI | B_CLUSTEROK |
89948225Smckusick				    (bp->b_flags & (B_VMIO | B_NEEDCOMMIT))) ||
900111886Sjeff				    tbp->b_wcred != bp->b_wcred) {
901112347Sjeff					BUF_UNLOCK(tbp);
90212767Sdyson					break;
90312767Sdyson				}
90412767Sdyson
90534630Sjulian				/*
90634630Sjulian				 * Check that the combined cluster
90734630Sjulian				 * would make sense with regard to pages
90834630Sjulian				 * and would not be too large
90934630Sjulian				 */
91012767Sdyson				if ((tbp->b_bcount != size) ||
91134630Sjulian				  ((bp->b_blkno + (dbsize * i)) !=
91234694Sdyson				    tbp->b_blkno) ||
91334630Sjulian				  ((tbp->b_npages + bp->b_npages) >
91451797Sphk				    (vp->v_mount->mnt_iosize_max / PAGE_SIZE))) {
91548225Smckusick					BUF_UNLOCK(tbp);
91612767Sdyson					break;
91712767Sdyson				}
918153192Srodrigc
91934630Sjulian				/*
920153192Srodrigc				 * Do not pull in pinned buffers.
921153192Srodrigc				 */
922153192Srodrigc				if (tbp->b_pin_count > 0) {
923153192Srodrigc					BUF_UNLOCK(tbp);
924153192Srodrigc					break;
925153192Srodrigc				}
926153192Srodrigc
927153192Srodrigc				/*
92834630Sjulian				 * Ok, it's passed all the tests,
92934630Sjulian				 * so remove it from the free list
93034630Sjulian				 * and mark it busy. We will use it.
93134630Sjulian				 */
93212767Sdyson				bremfree(tbp);
93312767Sdyson				tbp->b_flags &= ~B_DONE;
93434630Sjulian			} /* end of code for non-first buffers only */
93534630Sjulian			/*
93634630Sjulian			 * If the IO is via the VM then we do some
93785272Sdillon			 * special VM hackery (yuck).  Since the buffer's
93885272Sdillon			 * block size may not be page-aligned it is possible
93985272Sdillon			 * for a page to be shared between two buffers.  We
94085272Sdillon			 * have to get rid of the duplication when building
94185272Sdillon			 * the cluster.
94234630Sjulian			 */
94313490Sdyson			if (tbp->b_flags & B_VMIO) {
94432937Sdyson				vm_page_t m;
94532937Sdyson
946248084Sattilio				VM_OBJECT_WLOCK(tbp->b_bufobj->bo_object);
94734630Sjulian				if (i != 0) { /* if not first buffer */
94832937Sdyson					for (j = 0; j < tbp->b_npages; j += 1) {
94932937Sdyson						m = tbp->b_pages[j];
950254138Sattilio						if (vm_page_xbusied(m)) {
951248084Sattilio							VM_OBJECT_WUNLOCK(
952136985Salc							    tbp->b_object);
95350701Stegge							bqrelse(tbp);
95432937Sdyson							goto finishcluster;
95550701Stegge						}
95632937Sdyson					}
95732937Sdyson				}
95813490Sdyson				for (j = 0; j < tbp->b_npages; j += 1) {
95913490Sdyson					m = tbp->b_pages[j];
960254138Sattilio					vm_page_sbusy(m);
96138517Sdfr					vm_object_pip_add(m->object, 1);
96213490Sdyson					if ((bp->b_npages == 0) ||
96334630Sjulian					  (bp->b_pages[bp->b_npages - 1] != m)) {
96413490Sdyson						bp->b_pages[bp->b_npages] = m;
96513490Sdyson						bp->b_npages++;
96613490Sdyson					}
96712767Sdyson				}
968248084Sattilio				VM_OBJECT_WUNLOCK(tbp->b_bufobj->bo_object);
96912767Sdyson			}
97012767Sdyson			bp->b_bcount += size;
97112767Sdyson			bp->b_bufsize += size;
972246876Smckusick			/*
973246876Smckusick			 * If any of the clustered buffers have their
974246876Smckusick			 * B_BARRIER flag set, transfer that request to
975246876Smckusick			 * the cluster.
976246876Smckusick			 */
977246876Smckusick			bp->b_flags |= (tbp->b_flags & B_BARRIER);
978246876Smckusick			tbp->b_flags &= ~(B_DONE | B_BARRIER);
979246876Smckusick			tbp->b_flags |= B_ASYNC;
98058934Sphk			tbp->b_ioflags &= ~BIO_ERROR;
98158345Sphk			tbp->b_iocmd = BIO_WRITE;
982246876Smckusick			bundirty(tbp);
983132640Sphk			reassignbuf(tbp);		/* put on clean list */
984136767Sphk			bufobj_wref(tbp->b_bufobj);
98548333Speter			BUF_KERNPROC(tbp);
98612767Sdyson			TAILQ_INSERT_TAIL(&bp->b_cluster.cluster_head,
98712767Sdyson				tbp, b_cluster.cluster_entry);
9881541Srgrimes		}
98932937Sdyson	finishcluster:
990248508Skib		if ((bp->b_flags & B_UNMAPPED) == 0) {
991248508Skib			pmap_qenter(trunc_page((vm_offset_t) bp->b_data),
992248508Skib			    (vm_page_t *)bp->b_pages, bp->b_npages);
993248508Skib		}
99420054Sdyson		if (bp->b_bufsize > bp->b_kvasize)
99537559Sbde			panic(
99637559Sbde			    "cluster_wbuild: b_bufsize(%ld) > b_kvasize(%d)\n",
99737559Sbde			    bp->b_bufsize, bp->b_kvasize);
99820054Sdyson		bp->b_kvasize = bp->b_bufsize;
99912767Sdyson		totalwritten += bp->b_bufsize;
100017304Sdyson		bp->b_dirtyoff = 0;
100117304Sdyson		bp->b_dirtyend = bp->b_bufsize;
100212767Sdyson		bawrite(bp);
10031541Srgrimes
100412767Sdyson		len -= i;
10051541Srgrimes	}
100612767Sdyson	return totalwritten;
10071541Srgrimes}
10081541Srgrimes
10091541Srgrimes/*
10101541Srgrimes * Collect together all the buffers in a cluster.
10111541Srgrimes * Plus add one additional buffer.
10121541Srgrimes */
101312973Sbdestatic struct cluster_save *
1014248508Skibcluster_collectbufs(struct vnode *vp, struct buf *last_bp, int gbflags)
10151541Srgrimes{
10161541Srgrimes	struct cluster_save *buflist;
101741205Smckusick	struct buf *bp;
10185455Sdg	daddr_t lbn;
10191541Srgrimes	int i, len;
10201541Srgrimes
10211541Srgrimes	len = vp->v_lastw - vp->v_cstart + 1;
10221541Srgrimes	buflist = malloc(sizeof(struct buf *) * (len + 1) + sizeof(*buflist),
1023111119Simp	    M_SEGMENT, M_WAITOK);
10241541Srgrimes	buflist->bs_nchildren = 0;
10255455Sdg	buflist->bs_children = (struct buf **) (buflist + 1);
102641205Smckusick	for (lbn = vp->v_cstart, i = 0; i < len; lbn++, i++) {
1027248508Skib		(void)bread_gb(vp, lbn, last_bp->b_bcount, NOCRED,
1028248508Skib		    gbflags, &bp);
102941205Smckusick		buflist->bs_children[i] = bp;
103041205Smckusick		if (bp->b_blkno == bp->b_lblkno)
1031136989Sphk			VOP_BMAP(vp, bp->b_lblkno, NULL, &bp->b_blkno,
103241205Smckusick				NULL, NULL);
103341205Smckusick	}
103441529Smckusick	buflist->bs_children[i] = bp = last_bp;
103541529Smckusick	if (bp->b_blkno == bp->b_lblkno)
1036136989Sphk		VOP_BMAP(vp, bp->b_lblkno, NULL, &bp->b_blkno, NULL, NULL);
10371541Srgrimes	buflist->bs_nchildren = i + 1;
10381541Srgrimes	return (buflist);
10391541Srgrimes}
1040