vfs_cluster.c revision 111463
1/*-
2 * Copyright (c) 1993
3 *	The Regents of the University of California.  All rights reserved.
4 * Modifications/enhancements:
5 * 	Copyright (c) 1995 John S. Dyson.  All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 *    notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 *    notice, this list of conditions and the following disclaimer in the
14 *    documentation and/or other materials provided with the distribution.
15 * 3. All advertising materials mentioning features or use of this software
16 *    must display the following acknowledgement:
17 *	This product includes software developed by the University of
18 *	California, Berkeley and its contributors.
19 * 4. Neither the name of the University nor the names of its contributors
20 *    may be used to endorse or promote products derived from this software
21 *    without specific prior written permission.
22 *
23 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
24 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
25 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
26 * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
27 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
28 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
29 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
30 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
31 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
32 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
33 * SUCH DAMAGE.
34 *
35 *	@(#)vfs_cluster.c	8.7 (Berkeley) 2/13/94
36 * $FreeBSD: head/sys/kern/vfs_cluster.c 111463 2003-02-25 03:37:48Z jeff $
37 */
38
39#include "opt_debug_cluster.h"
40
41#include <sys/param.h>
42#include <sys/systm.h>
43#include <sys/stdint.h>
44#include <sys/kernel.h>
45#include <sys/proc.h>
46#include <sys/bio.h>
47#include <sys/buf.h>
48#include <sys/vnode.h>
49#include <sys/malloc.h>
50#include <sys/mount.h>
51#include <sys/resourcevar.h>
52#include <sys/vmmeter.h>
53#include <vm/vm.h>
54#include <vm/vm_object.h>
55#include <vm/vm_page.h>
56#include <sys/sysctl.h>
57
58#if defined(CLUSTERDEBUG)
59static int	rcluster= 0;
60SYSCTL_INT(_debug, OID_AUTO, rcluster, CTLFLAG_RW, &rcluster, 0,
61    "Debug VFS clustering code");
62#endif
63
64static MALLOC_DEFINE(M_SEGMENT, "cluster_save buffer", "cluster_save buffer");
65
66static struct cluster_save *
67	cluster_collectbufs(struct vnode *vp, struct buf *last_bp);
68static struct buf *
69	cluster_rbuild(struct vnode *vp, u_quad_t filesize, daddr_t lbn,
70			 daddr_t blkno, long size, int run, struct buf *fbp);
71
72static int write_behind = 1;
73SYSCTL_INT(_vfs, OID_AUTO, write_behind, CTLFLAG_RW, &write_behind, 0,
74    "Cluster write-behind; 0: disable, 1: enable, 2: backed off");
75
76/* Page expended to mark partially backed buffers */
77extern vm_page_t	bogus_page;
78
79/*
80 * Number of physical bufs (pbufs) this subsystem is allowed.
81 * Manipulated by vm_pager.c
82 */
83extern int cluster_pbuf_freecnt;
84
85/*
86 * Maximum number of blocks for read-ahead.
87 */
88#define MAXRA 32
89
90/*
91 * Read data to a buf, including read-ahead if we find this to be beneficial.
92 * cluster_read replaces bread.
93 */
94int
95cluster_read(vp, filesize, lblkno, size, cred, totread, seqcount, bpp)
96	struct vnode *vp;
97	u_quad_t filesize;
98	daddr_t lblkno;
99	long size;
100	struct ucred *cred;
101	long totread;
102	int seqcount;
103	struct buf **bpp;
104{
105	struct buf *bp, *rbp, *reqbp;
106	daddr_t blkno, origblkno;
107	int error, num_ra;
108	int i;
109	int maxra, racluster;
110	long origtotread;
111
112	error = 0;
113
114	/*
115	 * Try to limit the amount of read-ahead by a few
116	 * ad-hoc parameters.  This needs work!!!
117	 */
118	racluster = vp->v_mount->mnt_iosize_max / size;
119	maxra = 2 * racluster + (totread / size);
120	if (maxra > MAXRA)
121		maxra = MAXRA;
122	if (maxra > nbuf/8)
123		maxra = nbuf/8;
124
125	/*
126	 * get the requested block
127	 */
128	*bpp = reqbp = bp = getblk(vp, lblkno, size, 0, 0);
129	origblkno = lblkno;
130	origtotread = totread;
131
132	/*
133	 * if it is in the cache, then check to see if the reads have been
134	 * sequential.  If they have, then try some read-ahead, otherwise
135	 * back-off on prospective read-aheads.
136	 */
137	if (bp->b_flags & B_CACHE) {
138		if (!seqcount) {
139			return 0;
140		} else if ((bp->b_flags & B_RAM) == 0) {
141			return 0;
142		} else {
143			int s;
144			struct buf *tbp;
145			bp->b_flags &= ~B_RAM;
146			/*
147			 * We do the spl here so that there is no window
148			 * between the incore and the b_usecount increment
149			 * below.  We opt to keep the spl out of the loop
150			 * for efficiency.
151			 */
152			s = splbio();
153			VI_LOCK(vp);
154			for (i = 1; i < maxra; i++) {
155				/*
156				 * Stop if the buffer does not exist or it
157				 * is invalid (about to go away?)
158				 */
159				tbp = gbincore(vp, lblkno+i);
160				if (tbp == NULL || (tbp->b_flags & B_INVAL))
161					break;
162
163				/*
164				 * Set another read-ahead mark so we know
165				 * to check again.
166				 */
167				if (((i % racluster) == (racluster - 1)) ||
168					(i == (maxra - 1)))
169					tbp->b_flags |= B_RAM;
170			}
171			VI_UNLOCK(vp);
172			splx(s);
173			if (i >= maxra) {
174				return 0;
175			}
176			lblkno += i;
177		}
178		reqbp = bp = NULL;
179	} else {
180		off_t firstread = bp->b_offset;
181
182		KASSERT(bp->b_offset != NOOFFSET,
183		    ("cluster_read: no buffer offset"));
184		if (firstread + totread > filesize)
185			totread = filesize - firstread;
186		if (totread > size) {
187			int nblks = 0;
188			int ncontigafter;
189			while (totread > 0) {
190				nblks++;
191				totread -= size;
192			}
193			if (nblks == 1)
194				goto single_block_read;
195			if (nblks > racluster)
196				nblks = racluster;
197
198	    		error = VOP_BMAP(vp, lblkno, NULL,
199				&blkno, &ncontigafter, NULL);
200			if (error)
201				goto single_block_read;
202			if (blkno == -1)
203				goto single_block_read;
204			if (ncontigafter == 0)
205				goto single_block_read;
206			if (ncontigafter + 1 < nblks)
207				nblks = ncontigafter + 1;
208
209			bp = cluster_rbuild(vp, filesize, lblkno,
210				blkno, size, nblks, bp);
211			lblkno += (bp->b_bufsize / size);
212		} else {
213single_block_read:
214			/*
215			 * if it isn't in the cache, then get a chunk from
216			 * disk if sequential, otherwise just get the block.
217			 */
218			bp->b_flags |= B_RAM;
219			bp->b_iocmd = BIO_READ;
220			lblkno += 1;
221		}
222	}
223
224	/*
225	 * if we have been doing sequential I/O, then do some read-ahead
226	 */
227	rbp = NULL;
228	if (seqcount && (lblkno < (origblkno + seqcount))) {
229		/*
230		 * we now build the read-ahead buffer if it is desirable.
231		 */
232		if (((u_quad_t)(lblkno + 1) * size) <= filesize &&
233		    !(error = VOP_BMAP(vp, lblkno, NULL, &blkno, &num_ra, NULL)) &&
234		    blkno != -1) {
235			int nblksread;
236			int ntoread = num_ra + 1;
237			nblksread = (origtotread + size - 1) / size;
238			if (seqcount < nblksread)
239				seqcount = nblksread;
240			if (seqcount < ntoread)
241				ntoread = seqcount;
242			if (num_ra) {
243				rbp = cluster_rbuild(vp, filesize, lblkno,
244					blkno, size, ntoread, NULL);
245			} else {
246				rbp = getblk(vp, lblkno, size, 0, 0);
247				rbp->b_flags |= B_ASYNC | B_RAM;
248				rbp->b_iocmd = BIO_READ;
249				rbp->b_blkno = blkno;
250			}
251		}
252	}
253
254	/*
255	 * handle the synchronous read
256	 */
257	if (bp) {
258#if defined(CLUSTERDEBUG)
259		if (rcluster)
260			printf("S(%ld,%ld,%d) ",
261			    (long)bp->b_lblkno, bp->b_bcount, seqcount);
262#endif
263		if ((bp->b_flags & B_CLUSTER) == 0) {
264			vfs_busy_pages(bp, 0);
265		}
266		bp->b_flags &= ~B_INVAL;
267		bp->b_ioflags &= ~BIO_ERROR;
268		if ((bp->b_flags & B_ASYNC) || bp->b_iodone != NULL)
269			BUF_KERNPROC(bp);
270		error = VOP_STRATEGY(vp, bp);
271		curproc->p_stats->p_ru.ru_inblock++;
272	}
273
274	/*
275	 * and if we have read-aheads, do them too
276	 */
277	if (rbp) {
278		if (error) {
279			rbp->b_flags &= ~B_ASYNC;
280			brelse(rbp);
281		} else if (rbp->b_flags & B_CACHE) {
282			rbp->b_flags &= ~B_ASYNC;
283			bqrelse(rbp);
284		} else {
285#if defined(CLUSTERDEBUG)
286			if (rcluster) {
287				if (bp)
288					printf("A+");
289				else
290					printf("A");
291				printf("(%jd,%jd,%jd,%jd) ",
292				    (intmax_t)rbp->b_lblkno,
293				    (intmax_t)rbp->b_bcount,
294				    (intmax_t)(rbp->b_lblkno - origblkno),
295				    (intmax_t)seqcount);
296			}
297#endif
298
299			if ((rbp->b_flags & B_CLUSTER) == 0) {
300				vfs_busy_pages(rbp, 0);
301			}
302			rbp->b_flags &= ~B_INVAL;
303			rbp->b_ioflags &= ~BIO_ERROR;
304			if ((rbp->b_flags & B_ASYNC) || rbp->b_iodone != NULL)
305				BUF_KERNPROC(rbp);
306			(void) VOP_STRATEGY(vp, rbp);
307			curproc->p_stats->p_ru.ru_inblock++;
308		}
309	}
310	if (reqbp)
311		return (bufwait(reqbp));
312	else
313		return (error);
314}
315
316/*
317 * If blocks are contiguous on disk, use this to provide clustered
318 * read ahead.  We will read as many blocks as possible sequentially
319 * and then parcel them up into logical blocks in the buffer hash table.
320 */
321static struct buf *
322cluster_rbuild(vp, filesize, lbn, blkno, size, run, fbp)
323	struct vnode *vp;
324	u_quad_t filesize;
325	daddr_t lbn;
326	daddr_t blkno;
327	long size;
328	int run;
329	struct buf *fbp;
330{
331	struct buf *bp, *tbp;
332	daddr_t bn;
333	int i, inc, j;
334
335	GIANT_REQUIRED;
336
337	KASSERT(size == vp->v_mount->mnt_stat.f_iosize,
338	    ("cluster_rbuild: size %ld != filesize %ld\n",
339	    size, vp->v_mount->mnt_stat.f_iosize));
340
341	/*
342	 * avoid a division
343	 */
344	while ((u_quad_t) size * (lbn + run) > filesize) {
345		--run;
346	}
347
348	if (fbp) {
349		tbp = fbp;
350		tbp->b_iocmd = BIO_READ;
351	} else {
352		tbp = getblk(vp, lbn, size, 0, 0);
353		if (tbp->b_flags & B_CACHE)
354			return tbp;
355		tbp->b_flags |= B_ASYNC | B_RAM;
356		tbp->b_iocmd = BIO_READ;
357	}
358
359	tbp->b_blkno = blkno;
360	if( (tbp->b_flags & B_MALLOC) ||
361		((tbp->b_flags & B_VMIO) == 0) || (run <= 1) )
362		return tbp;
363
364	bp = trypbuf(&cluster_pbuf_freecnt);
365	if (bp == 0)
366		return tbp;
367
368	/*
369	 * We are synthesizing a buffer out of vm_page_t's, but
370	 * if the block size is not page aligned then the starting
371	 * address may not be either.  Inherit the b_data offset
372	 * from the original buffer.
373	 */
374	bp->b_data = (char *)((vm_offset_t)bp->b_data |
375	    ((vm_offset_t)tbp->b_data & PAGE_MASK));
376	bp->b_flags = B_ASYNC | B_CLUSTER | B_VMIO;
377	bp->b_iocmd = BIO_READ;
378	bp->b_iodone = cluster_callback;
379	bp->b_blkno = blkno;
380	bp->b_lblkno = lbn;
381	bp->b_offset = tbp->b_offset;
382	KASSERT(bp->b_offset != NOOFFSET, ("cluster_rbuild: no buffer offset"));
383	pbgetvp(vp, bp);
384
385	TAILQ_INIT(&bp->b_cluster.cluster_head);
386
387	bp->b_bcount = 0;
388	bp->b_bufsize = 0;
389	bp->b_npages = 0;
390
391	inc = btodb(size);
392	for (bn = blkno, i = 0; i < run; ++i, bn += inc) {
393		if (i != 0) {
394			if ((bp->b_npages * PAGE_SIZE) +
395			    round_page(size) > vp->v_mount->mnt_iosize_max) {
396				break;
397			}
398
399			/*
400			 * Shortcut some checks and try to avoid buffers that
401			 * would block in the lock.  The same checks have to
402			 * be made again after we officially get the buffer.
403			 */
404			if ((tbp = incore(vp, lbn + i)) != NULL &&
405			    (tbp->b_flags & B_INVAL) == 0) {
406				if (BUF_LOCK(tbp,
407				    LK_EXCLUSIVE | LK_NOWAIT, NULL))
408					break;
409				BUF_UNLOCK(tbp);
410
411				for (j = 0; j < tbp->b_npages; j++) {
412					if (tbp->b_pages[j]->valid)
413						break;
414				}
415
416				if (j != tbp->b_npages)
417					break;
418
419				if (tbp->b_bcount != size)
420					break;
421			}
422
423			tbp = getblk(vp, lbn + i, size, 0, 0);
424
425			/*
426			 * Stop scanning if the buffer is fully valid
427			 * (marked B_CACHE), or locked (may be doing a
428			 * background write), or if the buffer is not
429			 * VMIO backed.  The clustering code can only deal
430			 * with VMIO-backed buffers.
431			 */
432			if ((tbp->b_flags & (B_CACHE|B_LOCKED)) ||
433				(tbp->b_flags & B_VMIO) == 0) {
434				bqrelse(tbp);
435				break;
436			}
437
438			/*
439			 * The buffer must be completely invalid in order to
440			 * take part in the cluster.  If it is partially valid
441			 * then we stop.
442			 */
443			for (j = 0;j < tbp->b_npages; j++) {
444				if (tbp->b_pages[j]->valid)
445					break;
446			}
447			if (j != tbp->b_npages) {
448				bqrelse(tbp);
449				break;
450			}
451
452			/*
453			 * Set a read-ahead mark as appropriate
454			 */
455			if ((fbp && (i == 1)) || (i == (run - 1)))
456				tbp->b_flags |= B_RAM;
457
458			/*
459			 * Set the buffer up for an async read (XXX should
460			 * we do this only if we do not wind up brelse()ing?).
461			 * Set the block number if it isn't set, otherwise
462			 * if it is make sure it matches the block number we
463			 * expect.
464			 */
465			tbp->b_flags |= B_ASYNC;
466			tbp->b_iocmd = BIO_READ;
467			if (tbp->b_blkno == tbp->b_lblkno) {
468				tbp->b_blkno = bn;
469			} else if (tbp->b_blkno != bn) {
470				brelse(tbp);
471				break;
472			}
473		}
474		/*
475		 * XXX fbp from caller may not be B_ASYNC, but we are going
476		 * to biodone() it in cluster_callback() anyway
477		 */
478		BUF_KERNPROC(tbp);
479		TAILQ_INSERT_TAIL(&bp->b_cluster.cluster_head,
480			tbp, b_cluster.cluster_entry);
481		vm_page_lock_queues();
482		for (j = 0; j < tbp->b_npages; j += 1) {
483			vm_page_t m;
484			m = tbp->b_pages[j];
485			vm_page_io_start(m);
486			vm_object_pip_add(m->object, 1);
487			if ((bp->b_npages == 0) ||
488				(bp->b_pages[bp->b_npages-1] != m)) {
489				bp->b_pages[bp->b_npages] = m;
490				bp->b_npages++;
491			}
492			if ((m->valid & VM_PAGE_BITS_ALL) == VM_PAGE_BITS_ALL)
493				tbp->b_pages[j] = bogus_page;
494		}
495		vm_page_unlock_queues();
496		/*
497		 * XXX shouldn't this be += size for both, like in
498		 * cluster_wbuild()?
499		 *
500		 * Don't inherit tbp->b_bufsize as it may be larger due to
501		 * a non-page-aligned size.  Instead just aggregate using
502		 * 'size'.
503		 */
504		if (tbp->b_bcount != size)
505			printf("warning: tbp->b_bcount wrong %ld vs %ld\n", tbp->b_bcount, size);
506		if (tbp->b_bufsize != size)
507			printf("warning: tbp->b_bufsize wrong %ld vs %ld\n", tbp->b_bufsize, size);
508		bp->b_bcount += size;
509		bp->b_bufsize += size;
510	}
511
512	/*
513	 * Fully valid pages in the cluster are already good and do not need
514	 * to be re-read from disk.  Replace the page with bogus_page
515	 */
516	for (j = 0; j < bp->b_npages; j++) {
517		if ((bp->b_pages[j]->valid & VM_PAGE_BITS_ALL) ==
518		    VM_PAGE_BITS_ALL) {
519			bp->b_pages[j] = bogus_page;
520		}
521	}
522	if (bp->b_bufsize > bp->b_kvasize)
523		panic("cluster_rbuild: b_bufsize(%ld) > b_kvasize(%d)\n",
524		    bp->b_bufsize, bp->b_kvasize);
525	bp->b_kvasize = bp->b_bufsize;
526
527	pmap_qenter(trunc_page((vm_offset_t) bp->b_data),
528		(vm_page_t *)bp->b_pages, bp->b_npages);
529	return (bp);
530}
531
532/*
533 * Cleanup after a clustered read or write.
534 * This is complicated by the fact that any of the buffers might have
535 * extra memory (if there were no empty buffer headers at allocbuf time)
536 * that we will need to shift around.
537 */
538void
539cluster_callback(bp)
540	struct buf *bp;
541{
542	struct buf *nbp, *tbp;
543	int error = 0;
544
545	GIANT_REQUIRED;
546
547	/*
548	 * Must propogate errors to all the components.
549	 */
550	if (bp->b_ioflags & BIO_ERROR)
551		error = bp->b_error;
552
553	pmap_qremove(trunc_page((vm_offset_t) bp->b_data), bp->b_npages);
554	/*
555	 * Move memory from the large cluster buffer into the component
556	 * buffers and mark IO as done on these.
557	 */
558	for (tbp = TAILQ_FIRST(&bp->b_cluster.cluster_head);
559		tbp; tbp = nbp) {
560		nbp = TAILQ_NEXT(&tbp->b_cluster, cluster_entry);
561		if (error) {
562			tbp->b_ioflags |= BIO_ERROR;
563			tbp->b_error = error;
564		} else {
565			tbp->b_dirtyoff = tbp->b_dirtyend = 0;
566			tbp->b_flags &= ~B_INVAL;
567			tbp->b_ioflags &= ~BIO_ERROR;
568			/*
569			 * XXX the bdwrite()/bqrelse() issued during
570			 * cluster building clears B_RELBUF (see bqrelse()
571			 * comment).  If direct I/O was specified, we have
572			 * to restore it here to allow the buffer and VM
573			 * to be freed.
574			 */
575			if (tbp->b_flags & B_DIRECT)
576				tbp->b_flags |= B_RELBUF;
577		}
578		bufdone(tbp);
579	}
580	relpbuf(bp, &cluster_pbuf_freecnt);
581}
582
583/*
584 *	cluster_wbuild_wb:
585 *
586 *	Implement modified write build for cluster.
587 *
588 *		write_behind = 0	write behind disabled
589 *		write_behind = 1	write behind normal (default)
590 *		write_behind = 2	write behind backed-off
591 */
592
593static __inline int
594cluster_wbuild_wb(struct vnode *vp, long size, daddr_t start_lbn, int len)
595{
596	int r = 0;
597
598	switch(write_behind) {
599	case 2:
600		if (start_lbn < len)
601			break;
602		start_lbn -= len;
603		/* FALLTHROUGH */
604	case 1:
605		r = cluster_wbuild(vp, size, start_lbn, len);
606		/* FALLTHROUGH */
607	default:
608		/* FALLTHROUGH */
609		break;
610	}
611	return(r);
612}
613
614/*
615 * Do clustered write for FFS.
616 *
617 * Three cases:
618 *	1. Write is not sequential (write asynchronously)
619 *	Write is sequential:
620 *	2.	beginning of cluster - begin cluster
621 *	3.	middle of a cluster - add to cluster
622 *	4.	end of a cluster - asynchronously write cluster
623 */
624void
625cluster_write(bp, filesize, seqcount)
626	struct buf *bp;
627	u_quad_t filesize;
628	int seqcount;
629{
630	struct vnode *vp;
631	daddr_t lbn;
632	int maxclen, cursize;
633	int lblocksize;
634	int async;
635
636	vp = bp->b_vp;
637	if (vp->v_type == VREG) {
638		async = vp->v_mount->mnt_flag & MNT_ASYNC;
639		lblocksize = vp->v_mount->mnt_stat.f_iosize;
640	} else {
641		async = 0;
642		lblocksize = bp->b_bufsize;
643	}
644	lbn = bp->b_lblkno;
645	KASSERT(bp->b_offset != NOOFFSET, ("cluster_write: no buffer offset"));
646
647	/* Initialize vnode to beginning of file. */
648	if (lbn == 0)
649		vp->v_lasta = vp->v_clen = vp->v_cstart = vp->v_lastw = 0;
650
651	if (vp->v_clen == 0 || lbn != vp->v_lastw + 1 ||
652	    (bp->b_blkno != vp->v_lasta + btodb(lblocksize))) {
653		maxclen = vp->v_mount->mnt_iosize_max / lblocksize - 1;
654		if (vp->v_clen != 0) {
655			/*
656			 * Next block is not sequential.
657			 *
658			 * If we are not writing at end of file, the process
659			 * seeked to another point in the file since its last
660			 * write, or we have reached our maximum cluster size,
661			 * then push the previous cluster. Otherwise try
662			 * reallocating to make it sequential.
663			 *
664			 * Change to algorithm: only push previous cluster if
665			 * it was sequential from the point of view of the
666			 * seqcount heuristic, otherwise leave the buffer
667			 * intact so we can potentially optimize the I/O
668			 * later on in the buf_daemon or update daemon
669			 * flush.
670			 */
671			cursize = vp->v_lastw - vp->v_cstart + 1;
672			if (((u_quad_t) bp->b_offset + lblocksize) != filesize ||
673			    lbn != vp->v_lastw + 1 || vp->v_clen <= cursize) {
674				if (!async && seqcount > 0) {
675					cluster_wbuild_wb(vp, lblocksize,
676						vp->v_cstart, cursize);
677				}
678			} else {
679				struct buf **bpp, **endbp;
680				struct cluster_save *buflist;
681
682				buflist = cluster_collectbufs(vp, bp);
683				endbp = &buflist->bs_children
684				    [buflist->bs_nchildren - 1];
685				if (VOP_REALLOCBLKS(vp, buflist)) {
686					/*
687					 * Failed, push the previous cluster
688					 * if *really* writing sequentially
689					 * in the logical file (seqcount > 1),
690					 * otherwise delay it in the hopes that
691					 * the low level disk driver can
692					 * optimize the write ordering.
693					 */
694					for (bpp = buflist->bs_children;
695					     bpp < endbp; bpp++)
696						brelse(*bpp);
697					free(buflist, M_SEGMENT);
698					if (seqcount > 1) {
699						cluster_wbuild_wb(vp,
700						    lblocksize, vp->v_cstart,
701						    cursize);
702					}
703				} else {
704					/*
705					 * Succeeded, keep building cluster.
706					 */
707					for (bpp = buflist->bs_children;
708					     bpp <= endbp; bpp++)
709						bdwrite(*bpp);
710					free(buflist, M_SEGMENT);
711					vp->v_lastw = lbn;
712					vp->v_lasta = bp->b_blkno;
713					return;
714				}
715			}
716		}
717		/*
718		 * Consider beginning a cluster. If at end of file, make
719		 * cluster as large as possible, otherwise find size of
720		 * existing cluster.
721		 */
722		if ((vp->v_type == VREG) &&
723			((u_quad_t) bp->b_offset + lblocksize) != filesize &&
724		    (bp->b_blkno == bp->b_lblkno) &&
725		    (VOP_BMAP(vp, lbn, NULL, &bp->b_blkno, &maxclen, NULL) ||
726		     bp->b_blkno == -1)) {
727			bawrite(bp);
728			vp->v_clen = 0;
729			vp->v_lasta = bp->b_blkno;
730			vp->v_cstart = lbn + 1;
731			vp->v_lastw = lbn;
732			return;
733		}
734		vp->v_clen = maxclen;
735		if (!async && maxclen == 0) {	/* I/O not contiguous */
736			vp->v_cstart = lbn + 1;
737			bawrite(bp);
738		} else {	/* Wait for rest of cluster */
739			vp->v_cstart = lbn;
740			bdwrite(bp);
741		}
742	} else if (lbn == vp->v_cstart + vp->v_clen) {
743		/*
744		 * At end of cluster, write it out if seqcount tells us we
745		 * are operating sequentially, otherwise let the buf or
746		 * update daemon handle it.
747		 */
748		bdwrite(bp);
749		if (seqcount > 1)
750			cluster_wbuild_wb(vp, lblocksize, vp->v_cstart, vp->v_clen + 1);
751		vp->v_clen = 0;
752		vp->v_cstart = lbn + 1;
753	} else if (vm_page_count_severe()) {
754		/*
755		 * We are low on memory, get it going NOW
756		 */
757		bawrite(bp);
758	} else {
759		/*
760		 * In the middle of a cluster, so just delay the I/O for now.
761		 */
762		bdwrite(bp);
763	}
764	vp->v_lastw = lbn;
765	vp->v_lasta = bp->b_blkno;
766}
767
768
769/*
770 * This is an awful lot like cluster_rbuild...wish they could be combined.
771 * The last lbn argument is the current block on which I/O is being
772 * performed.  Check to see that it doesn't fall in the middle of
773 * the current block (if last_bp == NULL).
774 */
775int
776cluster_wbuild(vp, size, start_lbn, len)
777	struct vnode *vp;
778	long size;
779	daddr_t start_lbn;
780	int len;
781{
782	struct buf *bp, *tbp;
783	int i, j, s;
784	int totalwritten = 0;
785	int dbsize = btodb(size);
786
787	GIANT_REQUIRED;
788
789	while (len > 0) {
790		s = splbio();
791		/*
792		 * If the buffer is not delayed-write (i.e. dirty), or it
793		 * is delayed-write but either locked or inval, it cannot
794		 * partake in the clustered write.
795		 */
796		if (((tbp = incore(vp, start_lbn)) == NULL) ||
797		  ((tbp->b_flags & (B_LOCKED | B_INVAL | B_DELWRI)) != B_DELWRI) ||
798		  BUF_LOCK(tbp, LK_EXCLUSIVE | LK_NOWAIT, NULL)) {
799			++start_lbn;
800			--len;
801			splx(s);
802			continue;
803		}
804		bremfree(tbp);
805		tbp->b_flags &= ~B_DONE;
806		splx(s);
807
808		/*
809		 * Extra memory in the buffer, punt on this buffer.
810		 * XXX we could handle this in most cases, but we would
811		 * have to push the extra memory down to after our max
812		 * possible cluster size and then potentially pull it back
813		 * up if the cluster was terminated prematurely--too much
814		 * hassle.
815		 */
816		if (((tbp->b_flags & (B_CLUSTEROK | B_MALLOC | B_VMIO)) !=
817		     (B_CLUSTEROK | B_VMIO)) ||
818		  (tbp->b_bcount != tbp->b_bufsize) ||
819		  (tbp->b_bcount != size) ||
820		  (len == 1) ||
821		  ((bp = getpbuf(&cluster_pbuf_freecnt)) == NULL)) {
822			totalwritten += tbp->b_bufsize;
823			bawrite(tbp);
824			++start_lbn;
825			--len;
826			continue;
827		}
828
829		/*
830		 * We got a pbuf to make the cluster in.
831		 * so initialise it.
832		 */
833		TAILQ_INIT(&bp->b_cluster.cluster_head);
834		bp->b_bcount = 0;
835		bp->b_magic = tbp->b_magic;
836		bp->b_op = tbp->b_op;
837		bp->b_bufsize = 0;
838		bp->b_npages = 0;
839		if (tbp->b_wcred != NOCRED)
840			bp->b_wcred = crhold(tbp->b_wcred);
841
842		bp->b_blkno = tbp->b_blkno;
843		bp->b_lblkno = tbp->b_lblkno;
844		bp->b_offset = tbp->b_offset;
845
846		/*
847		 * We are synthesizing a buffer out of vm_page_t's, but
848		 * if the block size is not page aligned then the starting
849		 * address may not be either.  Inherit the b_data offset
850		 * from the original buffer.
851		 */
852		bp->b_data = (char *)((vm_offset_t)bp->b_data |
853		    ((vm_offset_t)tbp->b_data & PAGE_MASK));
854		bp->b_flags |= B_CLUSTER |
855				(tbp->b_flags & (B_VMIO | B_NEEDCOMMIT | B_NOWDRAIN));
856		bp->b_iodone = cluster_callback;
857		pbgetvp(vp, bp);
858		/*
859		 * From this location in the file, scan forward to see
860		 * if there are buffers with adjacent data that need to
861		 * be written as well.
862		 */
863		for (i = 0; i < len; ++i, ++start_lbn) {
864			if (i != 0) { /* If not the first buffer */
865				s = splbio();
866				/*
867				 * If the adjacent data is not even in core it
868				 * can't need to be written.
869				 */
870				if ((tbp = incore(vp, start_lbn)) == NULL) {
871					splx(s);
872					break;
873				}
874
875				/*
876				 * If it IS in core, but has different
877				 * characteristics, or is locked (which
878				 * means it could be undergoing a background
879				 * I/O or be in a weird state), then don't
880				 * cluster with it.
881				 */
882				if ((tbp->b_flags & (B_VMIO | B_CLUSTEROK |
883				    B_INVAL | B_DELWRI | B_NEEDCOMMIT))
884				  != (B_DELWRI | B_CLUSTEROK |
885				    (bp->b_flags & (B_VMIO | B_NEEDCOMMIT))) ||
886				    (tbp->b_flags & B_LOCKED) ||
887				    tbp->b_wcred != bp->b_wcred ||
888				    BUF_LOCK(tbp, LK_EXCLUSIVE | LK_NOWAIT,
889				    NULL)) {
890					splx(s);
891					break;
892				}
893
894				/*
895				 * Check that the combined cluster
896				 * would make sense with regard to pages
897				 * and would not be too large
898				 */
899				if ((tbp->b_bcount != size) ||
900				  ((bp->b_blkno + (dbsize * i)) !=
901				    tbp->b_blkno) ||
902				  ((tbp->b_npages + bp->b_npages) >
903				    (vp->v_mount->mnt_iosize_max / PAGE_SIZE))) {
904					BUF_UNLOCK(tbp);
905					splx(s);
906					break;
907				}
908				/*
909				 * Ok, it's passed all the tests,
910				 * so remove it from the free list
911				 * and mark it busy. We will use it.
912				 */
913				bremfree(tbp);
914				tbp->b_flags &= ~B_DONE;
915				splx(s);
916			} /* end of code for non-first buffers only */
917			/* check for latent dependencies to be handled */
918			if ((LIST_FIRST(&tbp->b_dep)) != NULL)
919				buf_start(tbp);
920			/*
921			 * If the IO is via the VM then we do some
922			 * special VM hackery (yuck).  Since the buffer's
923			 * block size may not be page-aligned it is possible
924			 * for a page to be shared between two buffers.  We
925			 * have to get rid of the duplication when building
926			 * the cluster.
927			 */
928			if (tbp->b_flags & B_VMIO) {
929				vm_page_t m;
930
931				if (i != 0) { /* if not first buffer */
932					for (j = 0; j < tbp->b_npages; j += 1) {
933						m = tbp->b_pages[j];
934						if (m->flags & PG_BUSY) {
935							bqrelse(tbp);
936							goto finishcluster;
937						}
938					}
939				}
940				vm_page_lock_queues();
941				for (j = 0; j < tbp->b_npages; j += 1) {
942					m = tbp->b_pages[j];
943					vm_page_io_start(m);
944					vm_object_pip_add(m->object, 1);
945					if ((bp->b_npages == 0) ||
946					  (bp->b_pages[bp->b_npages - 1] != m)) {
947						bp->b_pages[bp->b_npages] = m;
948						bp->b_npages++;
949					}
950				}
951				vm_page_unlock_queues();
952			}
953			bp->b_bcount += size;
954			bp->b_bufsize += size;
955
956			s = splbio();
957			bundirty(tbp);
958			tbp->b_flags &= ~B_DONE;
959			tbp->b_ioflags &= ~BIO_ERROR;
960			tbp->b_flags |= B_ASYNC;
961			tbp->b_iocmd = BIO_WRITE;
962			reassignbuf(tbp, tbp->b_vp);	/* put on clean list */
963			VI_LOCK(tbp->b_vp);
964			++tbp->b_vp->v_numoutput;
965			VI_UNLOCK(tbp->b_vp);
966			splx(s);
967			BUF_KERNPROC(tbp);
968			TAILQ_INSERT_TAIL(&bp->b_cluster.cluster_head,
969				tbp, b_cluster.cluster_entry);
970		}
971	finishcluster:
972		pmap_qenter(trunc_page((vm_offset_t) bp->b_data),
973			(vm_page_t *) bp->b_pages, bp->b_npages);
974		if (bp->b_bufsize > bp->b_kvasize)
975			panic(
976			    "cluster_wbuild: b_bufsize(%ld) > b_kvasize(%d)\n",
977			    bp->b_bufsize, bp->b_kvasize);
978		bp->b_kvasize = bp->b_bufsize;
979		totalwritten += bp->b_bufsize;
980		bp->b_dirtyoff = 0;
981		bp->b_dirtyend = bp->b_bufsize;
982		bawrite(bp);
983
984		len -= i;
985	}
986	return totalwritten;
987}
988
989/*
990 * Collect together all the buffers in a cluster.
991 * Plus add one additional buffer.
992 */
993static struct cluster_save *
994cluster_collectbufs(vp, last_bp)
995	struct vnode *vp;
996	struct buf *last_bp;
997{
998	struct cluster_save *buflist;
999	struct buf *bp;
1000	daddr_t lbn;
1001	int i, len;
1002
1003	len = vp->v_lastw - vp->v_cstart + 1;
1004	buflist = malloc(sizeof(struct buf *) * (len + 1) + sizeof(*buflist),
1005	    M_SEGMENT, M_WAITOK);
1006	buflist->bs_nchildren = 0;
1007	buflist->bs_children = (struct buf **) (buflist + 1);
1008	for (lbn = vp->v_cstart, i = 0; i < len; lbn++, i++) {
1009		(void) bread(vp, lbn, last_bp->b_bcount, NOCRED, &bp);
1010		buflist->bs_children[i] = bp;
1011		if (bp->b_blkno == bp->b_lblkno)
1012			VOP_BMAP(bp->b_vp, bp->b_lblkno, NULL, &bp->b_blkno,
1013				NULL, NULL);
1014	}
1015	buflist->bs_children[i] = bp = last_bp;
1016	if (bp->b_blkno == bp->b_lblkno)
1017		VOP_BMAP(bp->b_vp, bp->b_lblkno, NULL, &bp->b_blkno,
1018			NULL, NULL);
1019	buflist->bs_nchildren = i + 1;
1020	return (buflist);
1021}
1022