nfs_bio.c revision 58934
11541Srgrimes/*
21541Srgrimes * Copyright (c) 1989, 1993
31541Srgrimes *	The Regents of the University of California.  All rights reserved.
41541Srgrimes *
51541Srgrimes * This code is derived from software contributed to Berkeley by
61541Srgrimes * Rick Macklem at The University of Guelph.
71541Srgrimes *
81541Srgrimes * Redistribution and use in source and binary forms, with or without
91541Srgrimes * modification, are permitted provided that the following conditions
101541Srgrimes * are met:
111541Srgrimes * 1. Redistributions of source code must retain the above copyright
121541Srgrimes *    notice, this list of conditions and the following disclaimer.
131541Srgrimes * 2. Redistributions in binary form must reproduce the above copyright
141541Srgrimes *    notice, this list of conditions and the following disclaimer in the
151541Srgrimes *    documentation and/or other materials provided with the distribution.
161541Srgrimes * 3. All advertising materials mentioning features or use of this software
171541Srgrimes *    must display the following acknowledgement:
181541Srgrimes *	This product includes software developed by the University of
191541Srgrimes *	California, Berkeley and its contributors.
201541Srgrimes * 4. Neither the name of the University nor the names of its contributors
211541Srgrimes *    may be used to endorse or promote products derived from this software
221541Srgrimes *    without specific prior written permission.
231541Srgrimes *
241541Srgrimes * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
251541Srgrimes * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
261541Srgrimes * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
271541Srgrimes * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
281541Srgrimes * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
291541Srgrimes * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
301541Srgrimes * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
311541Srgrimes * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
321541Srgrimes * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
331541Srgrimes * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
341541Srgrimes * SUCH DAMAGE.
351541Srgrimes *
3622521Sdyson *	@(#)nfs_bio.c	8.9 (Berkeley) 3/30/95
3750477Speter * $FreeBSD: head/sys/nfsclient/nfs_bio.c 58934 2000-04-02 15:24:56Z phk $
381541Srgrimes */
391541Srgrimes
4022521Sdyson
411541Srgrimes#include <sys/param.h>
421541Srgrimes#include <sys/systm.h>
431541Srgrimes#include <sys/resourcevar.h>
443305Sphk#include <sys/signalvar.h>
451541Srgrimes#include <sys/proc.h>
461541Srgrimes#include <sys/buf.h>
471541Srgrimes#include <sys/vnode.h>
481541Srgrimes#include <sys/mount.h>
491541Srgrimes#include <sys/kernel.h>
501541Srgrimes
511541Srgrimes#include <vm/vm.h>
5212662Sdg#include <vm/vm_extern.h>
5325930Sdfr#include <vm/vm_page.h>
5425930Sdfr#include <vm/vm_object.h>
5525930Sdfr#include <vm/vm_pager.h>
5625930Sdfr#include <vm/vnode_pager.h>
571541Srgrimes
581541Srgrimes#include <nfs/rpcv2.h>
599336Sdfr#include <nfs/nfsproto.h>
601541Srgrimes#include <nfs/nfs.h>
611541Srgrimes#include <nfs/nfsmount.h>
621541Srgrimes#include <nfs/nqnfs.h>
639336Sdfr#include <nfs/nfsnode.h>
641541Srgrimes
6512911Sphkstatic struct buf *nfs_getcacheblk __P((struct vnode *vp, daddr_t bn, int size,
6612588Sbde					struct proc *p));
6712588Sbde
681541Srgrimesextern int nfs_numasync;
6942957Sdillonextern int nfs_pbuf_freecnt;
709336Sdfrextern struct nfsstats nfsstats;
711541Srgrimes
721541Srgrimes/*
7325930Sdfr * Vnode op for VM getpages.
7425930Sdfr */
7525930Sdfrint
7625930Sdfrnfs_getpages(ap)
7736563Speter	struct vop_getpages_args /* {
7836563Speter		struct vnode *a_vp;
7936563Speter		vm_page_t *a_m;
8036563Speter		int a_count;
8136563Speter		int a_reqpage;
8236563Speter		vm_ooffset_t a_offset;
8336563Speter	} */ *ap;
8425930Sdfr{
8546349Salc	int i, error, nextoff, size, toff, count, npages;
8632755Sdyson	struct uio uio;
8732755Sdyson	struct iovec iov;
8832755Sdyson	vm_offset_t kva;
8934206Sdyson	struct buf *bp;
9036563Speter	struct vnode *vp;
9136563Speter	struct proc *p;
9236563Speter	struct ucred *cred;
9336563Speter	struct nfsmount *nmp;
9436563Speter	vm_page_t *pages;
9525930Sdfr
9636563Speter	vp = ap->a_vp;
9736563Speter	p = curproc;				/* XXX */
9836563Speter	cred = curproc->p_ucred;		/* XXX */
9936563Speter	nmp = VFSTONFS(vp->v_mount);
10036563Speter	pages = ap->a_m;
10136563Speter	count = ap->a_count;
10236563Speter
10336563Speter	if (vp->v_object == NULL) {
10432286Sdyson		printf("nfs_getpages: called with non-merged cache vnode??\n");
10536563Speter		return VM_PAGER_ERROR;
10625930Sdfr	}
10725930Sdfr
10836563Speter	if ((nmp->nm_flag & NFSMNT_NFSV3) != 0 &&
10936563Speter	    (nmp->nm_state & NFSSTA_GOTFSINFO) == 0)
11036563Speter		(void)nfs_fsinfo(nmp, vp, cred, p);
11146349Salc
11246349Salc	npages = btoc(count);
11346349Salc
11434206Sdyson	/*
11546349Salc	 * If the requested page is partially valid, just return it and
11646349Salc	 * allow the pager to zero-out the blanks.  Partially valid pages
11746349Salc	 * can only occur at the file EOF.
11846349Salc	 */
11946349Salc
12046349Salc	{
12146349Salc		vm_page_t m = pages[ap->a_reqpage];
12246349Salc
12346349Salc		if (m->valid != 0) {
12446349Salc			/* handled by vm_fault now	  */
12546349Salc			/* vm_page_zero_invalid(m, TRUE); */
12646349Salc			for (i = 0; i < npages; ++i) {
12746349Salc				if (i != ap->a_reqpage)
12846349Salc					vnode_pager_freepage(pages[i]);
12946349Salc			}
13046349Salc			return(0);
13146349Salc		}
13246349Salc	}
13346349Salc
13446349Salc	/*
13534206Sdyson	 * We use only the kva address for the buffer, but this is extremely
13634206Sdyson	 * convienient and fast.
13734206Sdyson	 */
13842957Sdillon	bp = getpbuf(&nfs_pbuf_freecnt);
13925930Sdfr
14034206Sdyson	kva = (vm_offset_t) bp->b_data;
14136563Speter	pmap_qenter(kva, pages, npages);
14234206Sdyson
14332755Sdyson	iov.iov_base = (caddr_t) kva;
14436563Speter	iov.iov_len = count;
14532755Sdyson	uio.uio_iov = &iov;
14632755Sdyson	uio.uio_iovcnt = 1;
14736563Speter	uio.uio_offset = IDX_TO_OFF(pages[0]->pindex);
14836563Speter	uio.uio_resid = count;
14932755Sdyson	uio.uio_segflg = UIO_SYSSPACE;
15032755Sdyson	uio.uio_rw = UIO_READ;
15136563Speter	uio.uio_procp = p;
15225930Sdfr
15336563Speter	error = nfs_readrpc(vp, &uio, cred);
15434206Sdyson	pmap_qremove(kva, npages);
15532755Sdyson
15642957Sdillon	relpbuf(bp, &nfs_pbuf_freecnt);
15734206Sdyson
15842957Sdillon	if (error && (uio.uio_resid == count)) {
15942957Sdillon		printf("nfs_getpages: error %d\n", error);
16042957Sdillon		for (i = 0; i < npages; ++i) {
16142957Sdillon			if (i != ap->a_reqpage)
16242957Sdillon				vnode_pager_freepage(pages[i]);
16342957Sdillon		}
16434206Sdyson		return VM_PAGER_ERROR;
16542957Sdillon	}
16634206Sdyson
16745347Sjulian	/*
16845347Sjulian	 * Calculate the number of bytes read and validate only that number
16945347Sjulian	 * of bytes.  Note that due to pending writes, size may be 0.  This
17045347Sjulian	 * does not mean that the remaining data is invalid!
17145347Sjulian	 */
17245347Sjulian
17336563Speter	size = count - uio.uio_resid;
17434206Sdyson
17534206Sdyson	for (i = 0, toff = 0; i < npages; i++, toff = nextoff) {
17634206Sdyson		vm_page_t m;
17734206Sdyson		nextoff = toff + PAGE_SIZE;
17836563Speter		m = pages[i];
17934206Sdyson
18034206Sdyson		m->flags &= ~PG_ZERO;
18134206Sdyson
18234206Sdyson		if (nextoff <= size) {
18345347Sjulian			/*
18445347Sjulian			 * Read operation filled an entire page
18545347Sjulian			 */
18634206Sdyson			m->valid = VM_PAGE_BITS_ALL;
18749945Salc			vm_page_undirty(m);
18845347Sjulian		} else if (size > toff) {
18945347Sjulian			/*
19046349Salc			 * Read operation filled a partial page.
19145347Sjulian			 */
19246349Salc			m->valid = 0;
19345347Sjulian			vm_page_set_validclean(m, 0, size - toff);
19446349Salc			/* handled by vm_fault now	  */
19546349Salc			/* vm_page_zero_invalid(m, TRUE); */
19634206Sdyson		}
19734206Sdyson
19825930Sdfr		if (i != ap->a_reqpage) {
19934206Sdyson			/*
20034206Sdyson			 * Whether or not to leave the page activated is up in
20134206Sdyson			 * the air, but we should put the page on a page queue
20234206Sdyson			 * somewhere (it already is in the object).  Result:
20334206Sdyson			 * It appears that emperical results show that
20434206Sdyson			 * deactivating pages is best.
20534206Sdyson			 */
20634206Sdyson
20734206Sdyson			/*
20834206Sdyson			 * Just in case someone was asking for this page we
20934206Sdyson			 * now tell them that it is ok to use.
21034206Sdyson			 */
21134206Sdyson			if (!error) {
21234206Sdyson				if (m->flags & PG_WANTED)
21334206Sdyson					vm_page_activate(m);
21434206Sdyson				else
21534206Sdyson					vm_page_deactivate(m);
21638799Sdfr				vm_page_wakeup(m);
21734206Sdyson			} else {
21834206Sdyson				vnode_pager_freepage(m);
21934206Sdyson			}
22025930Sdfr		}
22125930Sdfr	}
22225930Sdfr	return 0;
22325930Sdfr}
22425930Sdfr
22525930Sdfr/*
22634206Sdyson * Vnode op for VM putpages.
22734096Smsmith */
22834096Smsmithint
22934096Smsmithnfs_putpages(ap)
23036563Speter	struct vop_putpages_args /* {
23136563Speter		struct vnode *a_vp;
23236563Speter		vm_page_t *a_m;
23336563Speter		int a_count;
23436563Speter		int a_sync;
23536563Speter		int *a_rtvals;
23636563Speter		vm_ooffset_t a_offset;
23736563Speter	} */ *ap;
23834096Smsmith{
23934206Sdyson	struct uio uio;
24034206Sdyson	struct iovec iov;
24134206Sdyson	vm_offset_t kva;
24234206Sdyson	struct buf *bp;
24336563Speter	int iomode, must_commit, i, error, npages, count;
24446349Salc	off_t offset;
24534206Sdyson	int *rtvals;
24636563Speter	struct vnode *vp;
24736563Speter	struct proc *p;
24836563Speter	struct ucred *cred;
24936563Speter	struct nfsmount *nmp;
25046349Salc	struct nfsnode *np;
25136563Speter	vm_page_t *pages;
25234206Sdyson
25336563Speter	vp = ap->a_vp;
25446349Salc	np = VTONFS(vp);
25536563Speter	p = curproc;				/* XXX */
25636563Speter	cred = curproc->p_ucred;		/* XXX */
25736563Speter	nmp = VFSTONFS(vp->v_mount);
25836563Speter	pages = ap->a_m;
25936563Speter	count = ap->a_count;
26034206Sdyson	rtvals = ap->a_rtvals;
26136563Speter	npages = btoc(count);
26246349Salc	offset = IDX_TO_OFF(pages[0]->pindex);
26334206Sdyson
26436563Speter	if ((nmp->nm_flag & NFSMNT_NFSV3) != 0 &&
26536563Speter	    (nmp->nm_state & NFSSTA_GOTFSINFO) == 0)
26636563Speter		(void)nfs_fsinfo(nmp, vp, cred, p);
26734206Sdyson
26834206Sdyson	for (i = 0; i < npages; i++) {
26934206Sdyson		rtvals[i] = VM_PAGER_AGAIN;
27034206Sdyson	}
27134206Sdyson
27234206Sdyson	/*
27346349Salc	 * When putting pages, do not extend file past EOF.
27446349Salc	 */
27546349Salc
27646349Salc	if (offset + count > np->n_size) {
27746349Salc		count = np->n_size - offset;
27846349Salc		if (count < 0)
27946349Salc			count = 0;
28046349Salc	}
28146349Salc
28246349Salc	/*
28334206Sdyson	 * We use only the kva address for the buffer, but this is extremely
28434206Sdyson	 * convienient and fast.
28534206Sdyson	 */
28642957Sdillon	bp = getpbuf(&nfs_pbuf_freecnt);
28734206Sdyson
28834206Sdyson	kva = (vm_offset_t) bp->b_data;
28936563Speter	pmap_qenter(kva, pages, npages);
29034206Sdyson
29134206Sdyson	iov.iov_base = (caddr_t) kva;
29236563Speter	iov.iov_len = count;
29334206Sdyson	uio.uio_iov = &iov;
29434206Sdyson	uio.uio_iovcnt = 1;
29546349Salc	uio.uio_offset = offset;
29636563Speter	uio.uio_resid = count;
29734206Sdyson	uio.uio_segflg = UIO_SYSSPACE;
29834206Sdyson	uio.uio_rw = UIO_WRITE;
29936563Speter	uio.uio_procp = p;
30034206Sdyson
30134206Sdyson	if ((ap->a_sync & VM_PAGER_PUT_SYNC) == 0)
30234206Sdyson	    iomode = NFSV3WRITE_UNSTABLE;
30334206Sdyson	else
30434206Sdyson	    iomode = NFSV3WRITE_FILESYNC;
30534206Sdyson
30636563Speter	error = nfs_writerpc(vp, &uio, cred, &iomode, &must_commit);
30734206Sdyson
30834206Sdyson	pmap_qremove(kva, npages);
30942957Sdillon	relpbuf(bp, &nfs_pbuf_freecnt);
31034206Sdyson
31134206Sdyson	if (!error) {
31236563Speter		int nwritten = round_page(count - uio.uio_resid) / PAGE_SIZE;
31334206Sdyson		for (i = 0; i < nwritten; i++) {
31434206Sdyson			rtvals[i] = VM_PAGER_OK;
31549945Salc			vm_page_undirty(pages[i]);
31634206Sdyson		}
31734206Sdyson		if (must_commit)
31836563Speter			nfs_clearcommit(vp->v_mount);
31934206Sdyson	}
32036563Speter	return rtvals[0];
32134096Smsmith}
32234096Smsmith
32334096Smsmith/*
3241541Srgrimes * Vnode op for read using bio
3251541Srgrimes */
3261549Srgrimesint
32746349Salcnfs_bioread(vp, uio, ioflag, cred)
3281541Srgrimes	register struct vnode *vp;
3291541Srgrimes	register struct uio *uio;
3301541Srgrimes	int ioflag;
3311541Srgrimes	struct ucred *cred;
3321541Srgrimes{
3331541Srgrimes	register struct nfsnode *np = VTONFS(vp);
33441791Sdt	register int biosize, i;
3351549Srgrimes	struct buf *bp = 0, *rabp;
3361541Srgrimes	struct vattr vattr;
3371541Srgrimes	struct proc *p;
3389336Sdfr	struct nfsmount *nmp = VFSTONFS(vp->v_mount);
3395455Sdg	daddr_t lbn, rabn;
34046349Salc	int bcount;
34151344Sdillon	int seqcount;
34246349Salc	int nra, error = 0, n = 0, on = 0;
3431541Srgrimes
3441541Srgrimes#ifdef DIAGNOSTIC
3451541Srgrimes	if (uio->uio_rw != UIO_READ)
3461541Srgrimes		panic("nfs_read mode");
3471541Srgrimes#endif
3481541Srgrimes	if (uio->uio_resid == 0)
3491541Srgrimes		return (0);
35036473Speter	if (uio->uio_offset < 0)	/* XXX VDIR cookies can be negative */
3511541Srgrimes		return (EINVAL);
3521541Srgrimes	p = uio->uio_procp;
35351344Sdillon
35436176Speter	if ((nmp->nm_flag & NFSMNT_NFSV3) != 0 &&
35536176Speter	    (nmp->nm_state & NFSSTA_GOTFSINFO) == 0)
3569336Sdfr		(void)nfs_fsinfo(nmp, vp, cred, p);
35736473Speter	if (vp->v_type != VDIR &&
35836473Speter	    (uio->uio_offset + uio->uio_resid) > nmp->nm_maxfilesize)
35936473Speter		return (EFBIG);
3609428Sdfr	biosize = vp->v_mount->mnt_stat.f_iosize;
36151344Sdillon	seqcount = (int)((off_t)(ioflag >> 16) * biosize / BKVASIZE);
3621541Srgrimes	/*
3631541Srgrimes	 * For nfs, cache consistency can only be maintained approximately.
3641541Srgrimes	 * Although RFC1094 does not specify the criteria, the following is
3651541Srgrimes	 * believed to be compatible with the reference port.
3661541Srgrimes	 * For nqnfs, full cache consistency is maintained within the loop.
3671541Srgrimes	 * For nfs:
3681541Srgrimes	 * If the file's modify time on the server has changed since the
3691541Srgrimes	 * last read rpc or you have written to the file,
3701541Srgrimes	 * you may have lost data cache consistency with the
3711541Srgrimes	 * server, so flush all of the file's data out of the cache.
3721541Srgrimes	 * Then force a getattr rpc to ensure that you have up to date
3731541Srgrimes	 * attributes.
3741541Srgrimes	 * NB: This implies that cache data can be read when up to
3751541Srgrimes	 * NFS_ATTRTIMEO seconds out of date. If you find that you need current
3761541Srgrimes	 * attributes this could be forced by setting n_attrstamp to 0 before
3771541Srgrimes	 * the VOP_GETATTR() call.
3781541Srgrimes	 */
37910219Sdfr	if ((nmp->nm_flag & NFSMNT_NQNFS) == 0) {
3801541Srgrimes		if (np->n_flag & NMODIFIED) {
3819336Sdfr			if (vp->v_type != VREG) {
3829336Sdfr				if (vp->v_type != VDIR)
3839336Sdfr					panic("nfs: bioread, not dir");
3849336Sdfr				nfs_invaldir(vp);
3853305Sphk				error = nfs_vinvalbuf(vp, V_SAVE, cred, p, 1);
3863305Sphk				if (error)
3871541Srgrimes					return (error);
3881541Srgrimes			}
3891541Srgrimes			np->n_attrstamp = 0;
3903305Sphk			error = VOP_GETATTR(vp, &vattr, cred, p);
3913305Sphk			if (error)
3921541Srgrimes				return (error);
39318397Snate			np->n_mtime = vattr.va_mtime.tv_sec;
3941541Srgrimes		} else {
3953305Sphk			error = VOP_GETATTR(vp, &vattr, cred, p);
3963305Sphk			if (error)
3971541Srgrimes				return (error);
39818397Snate			if (np->n_mtime != vattr.va_mtime.tv_sec) {
3999336Sdfr				if (vp->v_type == VDIR)
4009336Sdfr					nfs_invaldir(vp);
4013305Sphk				error = nfs_vinvalbuf(vp, V_SAVE, cred, p, 1);
4023305Sphk				if (error)
4031541Srgrimes					return (error);
40418397Snate				np->n_mtime = vattr.va_mtime.tv_sec;
4051541Srgrimes			}
4061541Srgrimes		}
4071541Srgrimes	}
4081541Srgrimes	do {
4091541Srgrimes
4101541Srgrimes	    /*
4111541Srgrimes	     * Get a valid lease. If cached data is stale, flush it.
4121541Srgrimes	     */
4131541Srgrimes	    if (nmp->nm_flag & NFSMNT_NQNFS) {
4149336Sdfr		if (NQNFS_CKINVALID(vp, np, ND_READ)) {
4151541Srgrimes		    do {
4169336Sdfr			error = nqnfs_getlease(vp, ND_READ, cred, p);
4171541Srgrimes		    } while (error == NQNFS_EXPIRED);
4181541Srgrimes		    if (error)
4191541Srgrimes			return (error);
4201541Srgrimes		    if (np->n_lrev != np->n_brev ||
4211541Srgrimes			(np->n_flag & NQNFSNONCACHE) ||
4221541Srgrimes			((np->n_flag & NMODIFIED) && vp->v_type == VDIR)) {
4239336Sdfr			if (vp->v_type == VDIR)
4249336Sdfr			    nfs_invaldir(vp);
4253305Sphk			error = nfs_vinvalbuf(vp, V_SAVE, cred, p, 1);
4263305Sphk			if (error)
4271541Srgrimes			    return (error);
4281541Srgrimes			np->n_brev = np->n_lrev;
4291541Srgrimes		    }
4301541Srgrimes		} else if (vp->v_type == VDIR && (np->n_flag & NMODIFIED)) {
4319336Sdfr		    nfs_invaldir(vp);
4323305Sphk		    error = nfs_vinvalbuf(vp, V_SAVE, cred, p, 1);
4333305Sphk		    if (error)
4341541Srgrimes			return (error);
4351541Srgrimes		}
4361541Srgrimes	    }
4371541Srgrimes	    if (np->n_flag & NQNFSNONCACHE) {
4381541Srgrimes		switch (vp->v_type) {
4391541Srgrimes		case VREG:
4409336Sdfr			return (nfs_readrpc(vp, uio, cred));
4411541Srgrimes		case VLNK:
4429336Sdfr			return (nfs_readlinkrpc(vp, uio, cred));
4431541Srgrimes		case VDIR:
4441541Srgrimes			break;
4453305Sphk		default:
44622521Sdyson			printf(" NQNFSNONCACHE: type %x unexpected\n",
4473305Sphk				vp->v_type);
4481541Srgrimes		};
4491541Srgrimes	    }
4501541Srgrimes	    switch (vp->v_type) {
4511541Srgrimes	    case VREG:
4521541Srgrimes		nfsstats.biocache_reads++;
4531541Srgrimes		lbn = uio->uio_offset / biosize;
4549336Sdfr		on = uio->uio_offset & (biosize - 1);
4551541Srgrimes
4561541Srgrimes		/*
4571541Srgrimes		 * Start the read ahead(s), as required.
4581541Srgrimes		 */
4599336Sdfr		if (nfs_numasync > 0 && nmp->nm_readahead > 0) {
46051344Sdillon		    for (nra = 0; nra < nmp->nm_readahead && nra < seqcount &&
46113612Smpp			(off_t)(lbn + 1 + nra) * biosize < np->n_size; nra++) {
4625455Sdg			rabn = lbn + 1 + nra;
4631541Srgrimes			if (!incore(vp, rabn)) {
4641541Srgrimes			    rabp = nfs_getcacheblk(vp, rabn, biosize, p);
4651541Srgrimes			    if (!rabp)
4661541Srgrimes				return (EINTR);
4678692Sdg			    if ((rabp->b_flags & (B_CACHE|B_DELWRI)) == 0) {
46858345Sphk				rabp->b_flags |= B_ASYNC;
46958345Sphk				rabp->b_iocmd = BIO_READ;
4705455Sdg				vfs_busy_pages(rabp, 0);
47146580Sphk				if (nfs_asyncio(rabp, cred, p)) {
47258934Sphk				    rabp->b_flags |= B_INVAL;
47358934Sphk				    rabp->b_ioflags |= BIO_ERROR;
4745455Sdg				    vfs_unbusy_pages(rabp);
4751541Srgrimes				    brelse(rabp);
47655431Sdillon				    break;
4771541Srgrimes				}
47855431Sdillon			    } else {
4795471Sdg				brelse(rabp);
48055431Sdillon			    }
4811541Srgrimes			}
4821541Srgrimes		    }
4831541Srgrimes		}
4841541Srgrimes
4851541Srgrimes		/*
48646349Salc		 * Obtain the buffer cache block.  Figure out the buffer size
48754605Sdillon		 * when we are at EOF.  If we are modifying the size of the
48854605Sdillon		 * buffer based on an EOF condition we need to hold
48954605Sdillon		 * nfs_rslock() through obtaining the buffer to prevent
49054605Sdillon		 * a potential writer-appender from messing with n_size.
49154605Sdillon		 * Otherwise we may accidently truncate the buffer and
49254605Sdillon		 * lose dirty data.
49346349Salc		 *
49446349Salc		 * Note that bcount is *not* DEV_BSIZE aligned.
4951541Srgrimes		 */
49646349Salc
49754605Sdillonagain:
49846349Salc		bcount = biosize;
49946349Salc		if ((off_t)lbn * biosize >= np->n_size) {
50046349Salc			bcount = 0;
50146349Salc		} else if ((off_t)(lbn + 1) * biosize > np->n_size) {
50246349Salc			bcount = np->n_size - (off_t)lbn * biosize;
5038692Sdg		}
50455431Sdillon		if (bcount != biosize) {
50555431Sdillon			switch(nfs_rslock(np, p)) {
50655431Sdillon			case ENOLCK:
50755431Sdillon				goto again;
50855431Sdillon				/* not reached */
50955431Sdillon			case EINTR:
51055431Sdillon			case ERESTART:
51155431Sdillon				return(EINTR);
51255431Sdillon				/* not reached */
51355431Sdillon			default:
51455431Sdillon				break;
51555431Sdillon			}
51655431Sdillon		}
51746349Salc
51846349Salc		bp = nfs_getcacheblk(vp, lbn, bcount, p);
51954605Sdillon
52054605Sdillon		if (bcount != biosize)
52154605Sdillon			nfs_rsunlock(np, p);
5227871Sdg		if (!bp)
5237871Sdg			return (EINTR);
52442957Sdillon
52525930Sdfr		/*
52646349Salc		 * If B_CACHE is not set, we must issue the read.  If this
52746349Salc		 * fails, we return an error.
52825930Sdfr		 */
52946349Salc
5307871Sdg		if ((bp->b_flags & B_CACHE) == 0) {
53158345Sphk		    bp->b_iocmd = BIO_READ;
53232755Sdyson		    vfs_busy_pages(bp, 0);
53332755Sdyson		    error = nfs_doio(bp, cred, p);
53432755Sdyson		    if (error) {
53532755Sdyson			brelse(bp);
53632755Sdyson			return (error);
53732755Sdyson		    }
5381541Srgrimes		}
53946349Salc
54046349Salc		/*
54146349Salc		 * on is the offset into the current bp.  Figure out how many
54246349Salc		 * bytes we can copy out of the bp.  Note that bcount is
54346349Salc		 * NOT DEV_BSIZE aligned.
54446349Salc		 *
54546349Salc		 * Then figure out how many bytes we can copy into the uio.
54646349Salc		 */
54746349Salc
54846349Salc		n = 0;
54946349Salc		if (on < bcount)
55046349Salc			n = min((unsigned)(bcount - on), uio->uio_resid);
5511541Srgrimes		break;
5521541Srgrimes	    case VLNK:
5531541Srgrimes		nfsstats.biocache_readlinks++;
5541541Srgrimes		bp = nfs_getcacheblk(vp, (daddr_t)0, NFS_MAXPATHLEN, p);
5551541Srgrimes		if (!bp)
5561541Srgrimes			return (EINTR);
5577871Sdg		if ((bp->b_flags & B_CACHE) == 0) {
55858345Sphk		    bp->b_iocmd = BIO_READ;
55932755Sdyson		    vfs_busy_pages(bp, 0);
56032755Sdyson		    error = nfs_doio(bp, cred, p);
56132755Sdyson		    if (error) {
56258934Sphk			bp->b_ioflags |= BIO_ERROR;
56332755Sdyson			brelse(bp);
56432755Sdyson			return (error);
56532755Sdyson		    }
5661541Srgrimes		}
5671541Srgrimes		n = min(uio->uio_resid, NFS_MAXPATHLEN - bp->b_resid);
5681541Srgrimes		on = 0;
5691541Srgrimes		break;
5701541Srgrimes	    case VDIR:
5711541Srgrimes		nfsstats.biocache_readdirs++;
57224577Sdfr		if (np->n_direofoffset
57324577Sdfr		    && uio->uio_offset >= np->n_direofoffset) {
57424577Sdfr		    return (0);
57524577Sdfr		}
57636979Sbde		lbn = (uoff_t)uio->uio_offset / NFS_DIRBLKSIZ;
5779336Sdfr		on = uio->uio_offset & (NFS_DIRBLKSIZ - 1);
5785455Sdg		bp = nfs_getcacheblk(vp, lbn, NFS_DIRBLKSIZ, p);
5791541Srgrimes		if (!bp)
5809336Sdfr		    return (EINTR);
5817871Sdg		if ((bp->b_flags & B_CACHE) == 0) {
58258345Sphk		    bp->b_iocmd = BIO_READ;
5839336Sdfr		    vfs_busy_pages(bp, 0);
5849336Sdfr		    error = nfs_doio(bp, cred, p);
58532912Stegge		    if (error) {
58632912Stegge			    brelse(bp);
58732912Stegge		    }
58832755Sdyson		    while (error == NFSERR_BAD_COOKIE) {
58946349Salc			printf("got bad cookie vp %p bp %p\n", vp, bp);
59032755Sdyson			nfs_invaldir(vp);
59132755Sdyson			error = nfs_vinvalbuf(vp, 0, cred, p, 1);
59232755Sdyson			/*
59332755Sdyson			 * Yuck! The directory has been modified on the
59432755Sdyson			 * server. The only way to get the block is by
59532755Sdyson			 * reading from the beginning to get all the
59632755Sdyson			 * offset cookies.
59746349Salc			 *
59846349Salc			 * Leave the last bp intact unless there is an error.
59946349Salc			 * Loop back up to the while if the error is another
60046349Salc			 * NFSERR_BAD_COOKIE (double yuch!).
60132755Sdyson			 */
60232755Sdyson			for (i = 0; i <= lbn && !error; i++) {
60332755Sdyson			    if (np->n_direofoffset
60432755Sdyson				&& (i * NFS_DIRBLKSIZ) >= np->n_direofoffset)
60524577Sdfr				    return (0);
60632755Sdyson			    bp = nfs_getcacheblk(vp, i, NFS_DIRBLKSIZ, p);
60732755Sdyson			    if (!bp)
60832755Sdyson				return (EINTR);
60946349Salc			    if ((bp->b_flags & B_CACHE) == 0) {
61058345Sphk				    bp->b_iocmd = BIO_READ;
61146349Salc				    vfs_busy_pages(bp, 0);
61246349Salc				    error = nfs_doio(bp, cred, p);
61346349Salc				    /*
61446349Salc				     * no error + B_INVAL == directory EOF,
61546349Salc				     * use the block.
61646349Salc				     */
61746349Salc				    if (error == 0 && (bp->b_flags & B_INVAL))
61846349Salc					    break;
61946349Salc			    }
62046349Salc			    /*
62146349Salc			     * An error will throw away the block and the
62246349Salc			     * for loop will break out.  If no error and this
62346349Salc			     * is not the block we want, we throw away the
62446349Salc			     * block and go for the next one via the for loop.
62546349Salc			     */
62646349Salc			    if (error || i < lbn)
62732755Sdyson				    brelse(bp);
6281541Srgrimes			}
62932912Stegge		    }
63046349Salc		    /*
63146349Salc		     * The above while is repeated if we hit another cookie
63246349Salc		     * error.  If we hit an error and it wasn't a cookie error,
63346349Salc		     * we give up.
63446349Salc		     */
63532912Stegge		    if (error)
6369336Sdfr			    return (error);
6371541Srgrimes		}
6381541Srgrimes
6391541Srgrimes		/*
6401541Srgrimes		 * If not eof and read aheads are enabled, start one.
6411541Srgrimes		 * (You need the current block first, so that you have the
6429336Sdfr		 *  directory offset cookie of the next block.)
6431541Srgrimes		 */
6441541Srgrimes		if (nfs_numasync > 0 && nmp->nm_readahead > 0 &&
64539782Smckusick		    (bp->b_flags & B_INVAL) == 0 &&
6469336Sdfr		    (np->n_direofoffset == 0 ||
6479336Sdfr		    (lbn + 1) * NFS_DIRBLKSIZ < np->n_direofoffset) &&
6489336Sdfr		    !(np->n_flag & NQNFSNONCACHE) &&
6499336Sdfr		    !incore(vp, lbn + 1)) {
6509336Sdfr			rabp = nfs_getcacheblk(vp, lbn + 1, NFS_DIRBLKSIZ, p);
6511541Srgrimes			if (rabp) {
6528692Sdg			    if ((rabp->b_flags & (B_CACHE|B_DELWRI)) == 0) {
65358345Sphk				rabp->b_flags |= B_ASYNC;
65458345Sphk				rabp->b_iocmd = BIO_READ;
6555455Sdg				vfs_busy_pages(rabp, 0);
65646580Sphk				if (nfs_asyncio(rabp, cred, p)) {
65758934Sphk				    rabp->b_flags |= B_INVAL;
65858934Sphk				    rabp->b_ioflags |= BIO_ERROR;
6595455Sdg				    vfs_unbusy_pages(rabp);
6601541Srgrimes				    brelse(rabp);
6611541Srgrimes				}
6625471Sdg			    } else {
6635471Sdg				brelse(rabp);
6641541Srgrimes			    }
6651541Srgrimes			}
6661541Srgrimes		}
66726469Sdfr		/*
66846349Salc		 * Unlike VREG files, whos buffer size ( bp->b_bcount ) is
66946349Salc		 * chopped for the EOF condition, we cannot tell how large
67046349Salc		 * NFS directories are going to be until we hit EOF.  So
67146349Salc		 * an NFS directory buffer is *not* chopped to its EOF.  Now,
67246349Salc		 * it just so happens that b_resid will effectively chop it
67346349Salc		 * to EOF.  *BUT* this information is lost if the buffer goes
67446349Salc		 * away and is reconstituted into a B_CACHE state ( due to
67546349Salc		 * being VMIO ) later.  So we keep track of the directory eof
67646349Salc		 * in np->n_direofoffset and chop it off as an extra step
67746349Salc		 * right here.
67826469Sdfr		 */
67926469Sdfr		n = lmin(uio->uio_resid, NFS_DIRBLKSIZ - bp->b_resid - on);
68046349Salc		if (np->n_direofoffset && n > np->n_direofoffset - uio->uio_offset)
68146349Salc			n = np->n_direofoffset - uio->uio_offset;
6821541Srgrimes		break;
6833305Sphk	    default:
6849336Sdfr		printf(" nfs_bioread: type %x unexpected\n",vp->v_type);
6853305Sphk		break;
6861541Srgrimes	    };
6871541Srgrimes
6881541Srgrimes	    if (n > 0) {
68934206Sdyson		    error = uiomove(bp->b_data + on, (int)n, uio);
6901541Srgrimes	    }
6911541Srgrimes	    switch (vp->v_type) {
6921541Srgrimes	    case VREG:
6931541Srgrimes		break;
6941541Srgrimes	    case VLNK:
6951541Srgrimes		n = 0;
6961541Srgrimes		break;
6971541Srgrimes	    case VDIR:
69846349Salc		/*
69946349Salc		 * Invalidate buffer if caching is disabled, forcing a
70046349Salc		 * re-read from the remote later.
70146349Salc		 */
7029336Sdfr		if (np->n_flag & NQNFSNONCACHE)
7039336Sdfr			bp->b_flags |= B_INVAL;
7041541Srgrimes		break;
7053305Sphk	    default:
7069336Sdfr		printf(" nfs_bioread: type %x unexpected\n",vp->v_type);
7073305Sphk	    }
70832755Sdyson	    brelse(bp);
7091541Srgrimes	} while (error == 0 && uio->uio_resid > 0 && n > 0);
7101541Srgrimes	return (error);
7111541Srgrimes}
7121541Srgrimes
7131541Srgrimes/*
7141541Srgrimes * Vnode op for write using bio
7151541Srgrimes */
7161549Srgrimesint
7171541Srgrimesnfs_write(ap)
7181541Srgrimes	struct vop_write_args /* {
7191541Srgrimes		struct vnode *a_vp;
7201541Srgrimes		struct uio *a_uio;
7211541Srgrimes		int  a_ioflag;
7221541Srgrimes		struct ucred *a_cred;
7231541Srgrimes	} */ *ap;
7241541Srgrimes{
72546349Salc	int biosize;
72646349Salc	struct uio *uio = ap->a_uio;
7271541Srgrimes	struct proc *p = uio->uio_procp;
72846349Salc	struct vnode *vp = ap->a_vp;
7291541Srgrimes	struct nfsnode *np = VTONFS(vp);
73046349Salc	struct ucred *cred = ap->a_cred;
7311541Srgrimes	int ioflag = ap->a_ioflag;
7321541Srgrimes	struct buf *bp;
7331541Srgrimes	struct vattr vattr;
7349336Sdfr	struct nfsmount *nmp = VFSTONFS(vp->v_mount);
73511921Sphk	daddr_t lbn;
73646349Salc	int bcount;
7379336Sdfr	int n, on, error = 0, iomode, must_commit;
73854605Sdillon	int haverslock = 0;
7391541Srgrimes
7401541Srgrimes#ifdef DIAGNOSTIC
7411541Srgrimes	if (uio->uio_rw != UIO_WRITE)
7421541Srgrimes		panic("nfs_write mode");
7431541Srgrimes	if (uio->uio_segflg == UIO_USERSPACE && uio->uio_procp != curproc)
7441541Srgrimes		panic("nfs_write proc");
7451541Srgrimes#endif
7461541Srgrimes	if (vp->v_type != VREG)
7471541Srgrimes		return (EIO);
7481541Srgrimes	if (np->n_flag & NWRITEERR) {
7491541Srgrimes		np->n_flag &= ~NWRITEERR;
7501541Srgrimes		return (np->n_error);
7511541Srgrimes	}
75236176Speter	if ((nmp->nm_flag & NFSMNT_NFSV3) != 0 &&
75336176Speter	    (nmp->nm_state & NFSSTA_GOTFSINFO) == 0)
7549336Sdfr		(void)nfs_fsinfo(nmp, vp, cred, p);
75554605Sdillon
75654605Sdillon	/*
75754605Sdillon	 * Synchronously flush pending buffers if we are in synchronous
75854605Sdillon	 * mode or if we are appending.
75954605Sdillon	 */
7601541Srgrimes	if (ioflag & (IO_APPEND | IO_SYNC)) {
7611541Srgrimes		if (np->n_flag & NMODIFIED) {
7621541Srgrimes			np->n_attrstamp = 0;
7633305Sphk			error = nfs_vinvalbuf(vp, V_SAVE, cred, p, 1);
7643305Sphk			if (error)
7651541Srgrimes				return (error);
7661541Srgrimes		}
7671541Srgrimes	}
76854605Sdillon
76954605Sdillon	/*
77054605Sdillon	 * If IO_APPEND then load uio_offset.  We restart here if we cannot
77154605Sdillon	 * get the append lock.
77254605Sdillon	 */
77354605Sdillonrestart:
77454605Sdillon	if (ioflag & IO_APPEND) {
77554605Sdillon		np->n_attrstamp = 0;
77654605Sdillon		error = VOP_GETATTR(vp, &vattr, cred, p);
77754605Sdillon		if (error)
77854605Sdillon			return (error);
77954605Sdillon		uio->uio_offset = np->n_size;
78054605Sdillon	}
78154605Sdillon
7821541Srgrimes	if (uio->uio_offset < 0)
7831541Srgrimes		return (EINVAL);
78436473Speter	if ((uio->uio_offset + uio->uio_resid) > nmp->nm_maxfilesize)
78536473Speter		return (EFBIG);
7861541Srgrimes	if (uio->uio_resid == 0)
7871541Srgrimes		return (0);
78854605Sdillon
7891541Srgrimes	/*
79054605Sdillon	 * We need to obtain the rslock if we intend to modify np->n_size
79154605Sdillon	 * in order to guarentee the append point with multiple contending
79254605Sdillon	 * writers, to guarentee that no other appenders modify n_size
79354605Sdillon	 * while we are trying to obtain a truncated buffer (i.e. to avoid
79454605Sdillon	 * accidently truncating data written by another appender due to
79554605Sdillon	 * the race), and to ensure that the buffer is populated prior to
79654605Sdillon	 * our extending of the file.  We hold rslock through the entire
79754605Sdillon	 * operation.
79854605Sdillon	 *
79954605Sdillon	 * Note that we do not synchronize the case where someone truncates
80054605Sdillon	 * the file while we are appending to it because attempting to lock
80154605Sdillon	 * this case may deadlock other parts of the system unexpectedly.
80254605Sdillon	 */
80354605Sdillon	if ((ioflag & IO_APPEND) ||
80454605Sdillon	    uio->uio_offset + uio->uio_resid > np->n_size) {
80555431Sdillon		switch(nfs_rslock(np, p)) {
80655431Sdillon		case ENOLCK:
80754605Sdillon			goto restart;
80855431Sdillon			/* not reached */
80955431Sdillon		case EINTR:
81055431Sdillon		case ERESTART:
81155431Sdillon			return(EINTR);
81255431Sdillon			/* not reached */
81355431Sdillon		default:
81455431Sdillon			break;
81555431Sdillon		}
81654605Sdillon		haverslock = 1;
81754605Sdillon	}
81854605Sdillon
81954605Sdillon	/*
8201541Srgrimes	 * Maybe this should be above the vnode op call, but so long as
8211541Srgrimes	 * file servers have no limits, i don't think it matters
8221541Srgrimes	 */
8231541Srgrimes	if (p && uio->uio_offset + uio->uio_resid >
8241541Srgrimes	      p->p_rlimit[RLIMIT_FSIZE].rlim_cur) {
8251541Srgrimes		psignal(p, SIGXFSZ);
82654605Sdillon		if (haverslock)
82754605Sdillon			nfs_rsunlock(np, p);
8281541Srgrimes		return (EFBIG);
8291541Srgrimes	}
83046349Salc
8319428Sdfr	biosize = vp->v_mount->mnt_stat.f_iosize;
83246349Salc
8331541Srgrimes	do {
8341541Srgrimes		/*
8351541Srgrimes		 * Check for a valid write lease.
8361541Srgrimes		 */
8371541Srgrimes		if ((nmp->nm_flag & NFSMNT_NQNFS) &&
8389336Sdfr		    NQNFS_CKINVALID(vp, np, ND_WRITE)) {
8391541Srgrimes			do {
8409336Sdfr				error = nqnfs_getlease(vp, ND_WRITE, cred, p);
8411541Srgrimes			} while (error == NQNFS_EXPIRED);
8421541Srgrimes			if (error)
84354605Sdillon				break;
8441541Srgrimes			if (np->n_lrev != np->n_brev ||
8451541Srgrimes			    (np->n_flag & NQNFSNONCACHE)) {
8463305Sphk				error = nfs_vinvalbuf(vp, V_SAVE, cred, p, 1);
8473305Sphk				if (error)
84854605Sdillon					break;
8491541Srgrimes				np->n_brev = np->n_lrev;
8501541Srgrimes			}
8511541Srgrimes		}
8529336Sdfr		if ((np->n_flag & NQNFSNONCACHE) && uio->uio_iovcnt == 1) {
8539336Sdfr		    iomode = NFSV3WRITE_FILESYNC;
8549336Sdfr		    error = nfs_writerpc(vp, uio, cred, &iomode, &must_commit);
8559336Sdfr		    if (must_commit)
85654605Sdillon			    nfs_clearcommit(vp->v_mount);
85754605Sdillon		    break;
8589336Sdfr		}
8591541Srgrimes		nfsstats.biocache_writes++;
8601541Srgrimes		lbn = uio->uio_offset / biosize;
8611541Srgrimes		on = uio->uio_offset & (biosize-1);
8621541Srgrimes		n = min((unsigned)(biosize - on), uio->uio_resid);
8631541Srgrimesagain:
86446349Salc		/*
86546349Salc		 * Handle direct append and file extension cases, calculate
86646349Salc		 * unaligned buffer size.
86746349Salc		 */
86846349Salc
86946349Salc		if (uio->uio_offset == np->n_size && n) {
87046349Salc			/*
87154605Sdillon			 * Get the buffer (in its pre-append state to maintain
87254605Sdillon			 * B_CACHE if it was previously set).  Resize the
87354605Sdillon			 * nfsnode after we have locked the buffer to prevent
87454605Sdillon			 * readers from reading garbage.
87546349Salc			 */
87646349Salc			bcount = on;
87746349Salc			bp = nfs_getcacheblk(vp, lbn, bcount, p);
87846349Salc
87954605Sdillon			if (bp != NULL) {
88054605Sdillon				long save;
88146349Salc
88254605Sdillon				np->n_size = uio->uio_offset + n;
88354605Sdillon				np->n_flag |= NMODIFIED;
88454605Sdillon				vnode_pager_setsize(vp, np->n_size);
88554605Sdillon
88654605Sdillon				save = bp->b_flags & B_CACHE;
88754605Sdillon				bcount += n;
88854605Sdillon				allocbuf(bp, bcount);
88954605Sdillon				bp->b_flags |= save;
89054605Sdillon			}
89146349Salc		} else {
89254605Sdillon			/*
89354605Sdillon			 * Obtain the locked cache block first, and then
89454605Sdillon			 * adjust the file's size as appropriate.
89554605Sdillon			 */
89654605Sdillon			bcount = on + n;
89754605Sdillon			if ((off_t)lbn * biosize + bcount < np->n_size) {
89854605Sdillon				if ((off_t)(lbn + 1) * biosize < np->n_size)
89954605Sdillon					bcount = biosize;
90054605Sdillon				else
90154605Sdillon					bcount = np->n_size - (off_t)lbn * biosize;
90254605Sdillon			}
90354605Sdillon
90454605Sdillon			bp = nfs_getcacheblk(vp, lbn, bcount, p);
90554605Sdillon
90646349Salc			if (uio->uio_offset + n > np->n_size) {
90746349Salc				np->n_size = uio->uio_offset + n;
90846349Salc				np->n_flag |= NMODIFIED;
90946349Salc				vnode_pager_setsize(vp, np->n_size);
91046349Salc			}
9118692Sdg		}
91246349Salc
91354605Sdillon		if (!bp) {
91454605Sdillon			error = EINTR;
91554605Sdillon			break;
91654605Sdillon		}
91754605Sdillon
91846349Salc		/*
91946349Salc		 * Issue a READ if B_CACHE is not set.  In special-append
92046349Salc		 * mode, B_CACHE is based on the buffer prior to the write
92146349Salc		 * op and is typically set, avoiding the read.  If a read
92246349Salc		 * is required in special append mode, the server will
92346349Salc		 * probably send us a short-read since we extended the file
92446349Salc		 * on our end, resulting in b_resid == 0 and, thusly,
92546349Salc		 * B_CACHE getting set.
92646349Salc		 *
92746349Salc		 * We can also avoid issuing the read if the write covers
92846349Salc		 * the entire buffer.  We have to make sure the buffer state
92946349Salc		 * is reasonable in this case since we will not be initiating
93046349Salc		 * I/O.  See the comments in kern/vfs_bio.c's getblk() for
93146349Salc		 * more information.
93246349Salc		 *
93346349Salc		 * B_CACHE may also be set due to the buffer being cached
93446349Salc		 * normally.
93546349Salc		 */
93646349Salc
93746349Salc		if (on == 0 && n == bcount) {
93846349Salc			bp->b_flags |= B_CACHE;
93958934Sphk			bp->b_flags &= ~B_INVAL;
94058934Sphk			bp->b_ioflags &= ~BIO_ERROR;
9418692Sdg		}
94246349Salc
94346349Salc		if ((bp->b_flags & B_CACHE) == 0) {
94458345Sphk			bp->b_iocmd = BIO_READ;
94546349Salc			vfs_busy_pages(bp, 0);
94646349Salc			error = nfs_doio(bp, cred, p);
94746349Salc			if (error) {
94846349Salc				brelse(bp);
94954605Sdillon				break;
95046349Salc			}
95146349Salc		}
95254605Sdillon		if (!bp) {
95354605Sdillon			error = EINTR;
95454605Sdillon			break;
95554605Sdillon		}
9561541Srgrimes		if (bp->b_wcred == NOCRED) {
9571541Srgrimes			crhold(cred);
9581541Srgrimes			bp->b_wcred = cred;
9591541Srgrimes		}
9601541Srgrimes		np->n_flag |= NMODIFIED;
9618692Sdg
96245347Sjulian		/*
96354605Sdillon		 * If dirtyend exceeds file size, chop it down.  This should
96454605Sdillon		 * not normally occur but there is an append race where it
96554605Sdillon		 * might occur XXX, so we log it.
96654605Sdillon		 *
96754605Sdillon		 * If the chopping creates a reverse-indexed or degenerate
96854605Sdillon		 * situation with dirtyoff/end, we 0 both of them.
96945347Sjulian		 */
97045347Sjulian
97154605Sdillon		if (bp->b_dirtyend > bcount) {
97254605Sdillon			printf("NFS append race @%lx:%d\n",
97354605Sdillon			    (long)bp->b_blkno * DEV_BSIZE,
97454605Sdillon			    bp->b_dirtyend - bcount);
97554605Sdillon			bp->b_dirtyend = bcount;
97654605Sdillon		}
97754605Sdillon
97845347Sjulian		if (bp->b_dirtyoff >= bp->b_dirtyend)
97945347Sjulian			bp->b_dirtyoff = bp->b_dirtyend = 0;
98031617Sdyson
9811541Srgrimes		/*
98231617Sdyson		 * If the new write will leave a contiguous dirty
98331617Sdyson		 * area, just update the b_dirtyoff and b_dirtyend,
98431617Sdyson		 * otherwise force a write rpc of the old dirty area.
98546349Salc		 *
98646349Salc		 * While it is possible to merge discontiguous writes due to
98746349Salc		 * our having a B_CACHE buffer ( and thus valid read data
98846349Salc		 * for the hole), we don't because it could lead to
98946349Salc		 * significant cache coherency problems with multiple clients,
99046349Salc		 * especially if locking is implemented later on.
99146349Salc		 *
99246349Salc		 * as an optimization we could theoretically maintain
99346349Salc		 * a linked list of discontinuous areas, but we would still
99446349Salc		 * have to commit them separately so there isn't much
99546349Salc		 * advantage to it except perhaps a bit of asynchronization.
99631617Sdyson		 */
99742957Sdillon
99831617Sdyson		if (bp->b_dirtyend > 0 &&
99931617Sdyson		    (on > bp->b_dirtyend || (on + n) < bp->b_dirtyoff)) {
100058349Sphk			if (BUF_WRITE(bp) == EINTR)
100131617Sdyson				return (EINTR);
100231617Sdyson			goto again;
100331617Sdyson		}
100431617Sdyson
100531617Sdyson		/*
10061541Srgrimes		 * Check for valid write lease and get one as required.
10071541Srgrimes		 * In case getblk() and/or bwrite() delayed us.
10081541Srgrimes		 */
10091541Srgrimes		if ((nmp->nm_flag & NFSMNT_NQNFS) &&
10109336Sdfr		    NQNFS_CKINVALID(vp, np, ND_WRITE)) {
10111541Srgrimes			do {
10129336Sdfr				error = nqnfs_getlease(vp, ND_WRITE, cred, p);
10131541Srgrimes			} while (error == NQNFS_EXPIRED);
10141541Srgrimes			if (error) {
10151541Srgrimes				brelse(bp);
101654605Sdillon				break;
10171541Srgrimes			}
10181541Srgrimes			if (np->n_lrev != np->n_brev ||
10191541Srgrimes			    (np->n_flag & NQNFSNONCACHE)) {
10201541Srgrimes				brelse(bp);
10213305Sphk				error = nfs_vinvalbuf(vp, V_SAVE, cred, p, 1);
10223305Sphk				if (error)
102354605Sdillon					break;
10241541Srgrimes				np->n_brev = np->n_lrev;
10251541Srgrimes				goto again;
10261541Srgrimes			}
10271541Srgrimes		}
102834206Sdyson
10293305Sphk		error = uiomove((char *)bp->b_data + on, n, uio);
103054480Sdillon
103154480Sdillon		/*
103254480Sdillon		 * Since this block is being modified, it must be written
103354480Sdillon		 * again and not just committed.  Since write clustering does
103454480Sdillon		 * not work for the stage 1 data write, only the stage 2
103554480Sdillon		 * commit rpc, we have to clear B_CLUSTEROK as well.
103654480Sdillon		 */
103754480Sdillon		bp->b_flags &= ~(B_NEEDCOMMIT | B_CLUSTEROK);
103854480Sdillon
10393305Sphk		if (error) {
104058934Sphk			bp->b_ioflags |= BIO_ERROR;
10411541Srgrimes			brelse(bp);
104254605Sdillon			break;
10431541Srgrimes		}
104434206Sdyson
104534206Sdyson		/*
104645347Sjulian		 * Only update dirtyoff/dirtyend if not a degenerate
104745347Sjulian		 * condition.
104845347Sjulian		 */
104945347Sjulian		if (n) {
105045347Sjulian			if (bp->b_dirtyend > 0) {
105145347Sjulian				bp->b_dirtyoff = min(on, bp->b_dirtyoff);
105245347Sjulian				bp->b_dirtyend = max((on + n), bp->b_dirtyend);
105345347Sjulian			} else {
105445347Sjulian				bp->b_dirtyoff = on;
105545347Sjulian				bp->b_dirtyend = on + n;
105645347Sjulian			}
105746349Salc			vfs_bio_set_validclean(bp, on, n);
10581541Srgrimes		}
105945347Sjulian
106044679Sjulian		/*
10611541Srgrimes		 * If the lease is non-cachable or IO_SYNC do bwrite().
106246349Salc		 *
106346349Salc		 * IO_INVAL appears to be unused.  The idea appears to be
106446349Salc		 * to turn off caching in this case.  Very odd.  XXX
10651541Srgrimes		 */
10661541Srgrimes		if ((np->n_flag & NQNFSNONCACHE) || (ioflag & IO_SYNC)) {
106734206Sdyson			if (ioflag & IO_INVAL)
106846349Salc				bp->b_flags |= B_NOCACHE;
106958349Sphk			error = BUF_WRITE(bp);
10703305Sphk			if (error)
107154605Sdillon				break;
10729336Sdfr			if (np->n_flag & NQNFSNONCACHE) {
10739336Sdfr				error = nfs_vinvalbuf(vp, V_SAVE, cred, p, 1);
10749336Sdfr				if (error)
107554605Sdillon					break;
10769336Sdfr			}
10771541Srgrimes		} else if ((n + on) == biosize &&
10781541Srgrimes			(nmp->nm_flag & NFSMNT_NQNFS) == 0) {
10799336Sdfr			bp->b_flags |= B_ASYNC;
108046580Sphk			(void)nfs_writebp(bp, 0, 0);
108146349Salc		} else {
10821541Srgrimes			bdwrite(bp);
108346349Salc		}
10841541Srgrimes	} while (uio->uio_resid > 0 && n > 0);
108554605Sdillon
108654605Sdillon	if (haverslock)
108754605Sdillon		nfs_rsunlock(np, p);
108854605Sdillon
108954605Sdillon	return (error);
10901541Srgrimes}
10911541Srgrimes
10921541Srgrimes/*
10931541Srgrimes * Get an nfs cache block.
109454480Sdillon *
10951541Srgrimes * Allocate a new one if the block isn't currently in the cache
10961541Srgrimes * and return the block marked busy. If the calling process is
10971541Srgrimes * interrupted by a signal for an interruptible mount point, return
10981541Srgrimes * NULL.
109954480Sdillon *
110054480Sdillon * The caller must carefully deal with the possible B_INVAL state of
110154480Sdillon * the buffer.  nfs_doio() clears B_INVAL (and nfs_asyncio() clears it
110254480Sdillon * indirectly), so synchronous reads can be issued without worrying about
110354480Sdillon * the B_INVAL state.  We have to be a little more careful when dealing
110454480Sdillon * with writes (see comments in nfs_write()) when extending a file past
110554480Sdillon * its EOF.
11061541Srgrimes */
110712911Sphkstatic struct buf *
11081541Srgrimesnfs_getcacheblk(vp, bn, size, p)
11091541Srgrimes	struct vnode *vp;
11101541Srgrimes	daddr_t bn;
11111541Srgrimes	int size;
11121541Srgrimes	struct proc *p;
11131541Srgrimes{
11141541Srgrimes	register struct buf *bp;
111532755Sdyson	struct mount *mp;
111632755Sdyson	struct nfsmount *nmp;
11171541Srgrimes
111832755Sdyson	mp = vp->v_mount;
111932755Sdyson	nmp = VFSTONFS(mp);
112032755Sdyson
11211541Srgrimes	if (nmp->nm_flag & NFSMNT_INT) {
11221541Srgrimes		bp = getblk(vp, bn, size, PCATCH, 0);
11231541Srgrimes		while (bp == (struct buf *)0) {
11241541Srgrimes			if (nfs_sigintr(nmp, (struct nfsreq *)0, p))
11251541Srgrimes				return ((struct buf *)0);
11261541Srgrimes			bp = getblk(vp, bn, size, 0, 2 * hz);
11271541Srgrimes		}
112846349Salc	} else {
11291541Srgrimes		bp = getblk(vp, bn, size, 0, 0);
113046349Salc	}
11315455Sdg
113241791Sdt	if (vp->v_type == VREG) {
113332755Sdyson		int biosize;
113446349Salc
113532755Sdyson		biosize = mp->mnt_stat.f_iosize;
113641791Sdt		bp->b_blkno = bn * (biosize / DEV_BSIZE);
113732755Sdyson	}
11381541Srgrimes	return (bp);
11391541Srgrimes}
11401541Srgrimes
11411541Srgrimes/*
11421541Srgrimes * Flush and invalidate all dirty buffers. If another process is already
11431541Srgrimes * doing the flush, just wait for completion.
11441541Srgrimes */
11451549Srgrimesint
11461541Srgrimesnfs_vinvalbuf(vp, flags, cred, p, intrflg)
11471541Srgrimes	struct vnode *vp;
11481541Srgrimes	int flags;
11491541Srgrimes	struct ucred *cred;
11501541Srgrimes	struct proc *p;
11511541Srgrimes	int intrflg;
11521541Srgrimes{
11531541Srgrimes	register struct nfsnode *np = VTONFS(vp);
11541541Srgrimes	struct nfsmount *nmp = VFSTONFS(vp->v_mount);
11551541Srgrimes	int error = 0, slpflag, slptimeo;
11561541Srgrimes
115732755Sdyson	if (vp->v_flag & VXLOCK) {
115832755Sdyson		return (0);
115932755Sdyson	}
116032755Sdyson
11611541Srgrimes	if ((nmp->nm_flag & NFSMNT_INT) == 0)
11621541Srgrimes		intrflg = 0;
11631541Srgrimes	if (intrflg) {
11641541Srgrimes		slpflag = PCATCH;
11651541Srgrimes		slptimeo = 2 * hz;
11661541Srgrimes	} else {
11671541Srgrimes		slpflag = 0;
11681541Srgrimes		slptimeo = 0;
11691541Srgrimes	}
11701541Srgrimes	/*
11711541Srgrimes	 * First wait for any other process doing a flush to complete.
11721541Srgrimes	 */
11731541Srgrimes	while (np->n_flag & NFLUSHINPROG) {
11741541Srgrimes		np->n_flag |= NFLUSHWANT;
11751541Srgrimes		error = tsleep((caddr_t)&np->n_flag, PRIBIO + 2, "nfsvinval",
11761541Srgrimes			slptimeo);
11771541Srgrimes		if (error && intrflg && nfs_sigintr(nmp, (struct nfsreq *)0, p))
11781541Srgrimes			return (EINTR);
11791541Srgrimes	}
11801541Srgrimes
11811541Srgrimes	/*
11821541Srgrimes	 * Now, flush as required.
11831541Srgrimes	 */
11841541Srgrimes	np->n_flag |= NFLUSHINPROG;
11851541Srgrimes	error = vinvalbuf(vp, flags, cred, p, slpflag, 0);
11861541Srgrimes	while (error) {
11871541Srgrimes		if (intrflg && nfs_sigintr(nmp, (struct nfsreq *)0, p)) {
11881541Srgrimes			np->n_flag &= ~NFLUSHINPROG;
11891541Srgrimes			if (np->n_flag & NFLUSHWANT) {
11901541Srgrimes				np->n_flag &= ~NFLUSHWANT;
11911541Srgrimes				wakeup((caddr_t)&np->n_flag);
11921541Srgrimes			}
11931541Srgrimes			return (EINTR);
11941541Srgrimes		}
11951541Srgrimes		error = vinvalbuf(vp, flags, cred, p, 0, slptimeo);
11961541Srgrimes	}
11971541Srgrimes	np->n_flag &= ~(NMODIFIED | NFLUSHINPROG);
11981541Srgrimes	if (np->n_flag & NFLUSHWANT) {
11991541Srgrimes		np->n_flag &= ~NFLUSHWANT;
12001541Srgrimes		wakeup((caddr_t)&np->n_flag);
12011541Srgrimes	}
12021541Srgrimes	return (0);
12031541Srgrimes}
12041541Srgrimes
12051541Srgrimes/*
12061541Srgrimes * Initiate asynchronous I/O. Return an error if no nfsiods are available.
12071541Srgrimes * This is mainly to avoid queueing async I/O requests when the nfsiods
12081541Srgrimes * are all hung on a dead server.
120946349Salc *
121058934Sphk * Note: nfs_asyncio() does not clear (BIO_ERROR|B_INVAL) but when the bp
121146349Salc * is eventually dequeued by the async daemon, nfs_doio() *will*.
12121541Srgrimes */
12131549Srgrimesint
121446580Sphknfs_asyncio(bp, cred, procp)
12151541Srgrimes	register struct buf *bp;
12161541Srgrimes	struct ucred *cred;
121746580Sphk	struct proc *procp;
12181541Srgrimes{
121919449Sdfr	struct nfsmount *nmp;
122019449Sdfr	int i;
122119449Sdfr	int gotiod;
122219449Sdfr	int slpflag = 0;
122319449Sdfr	int slptimeo = 0;
122419449Sdfr	int error;
12251541Srgrimes
122655431Sdillon	/*
122755431Sdillon	 * If no async daemons then return EIO to force caller to run the rpc
122855431Sdillon	 * synchronously.
122955431Sdillon	 */
12301541Srgrimes	if (nfs_numasync == 0)
12311541Srgrimes		return (EIO);
123244679Sjulian
123319449Sdfr	nmp = VFSTONFS(bp->b_vp->v_mount);
123455431Sdillon
123555431Sdillon	/*
123655431Sdillon	 * Commits are usually short and sweet so lets save some cpu and
123755431Sdillon	 * leave the async daemons for more important rpc's (such as reads
123855431Sdillon	 * and writes).
123955431Sdillon	 */
124058345Sphk	if (bp->b_iocmd == BIO_WRITE && (bp->b_flags & B_NEEDCOMMIT) &&
124155431Sdillon	    (nmp->nm_bufqiods > nfs_numasync / 2)) {
124255431Sdillon		return(EIO);
124355431Sdillon	}
124455431Sdillon
124519449Sdfragain:
124619449Sdfr	if (nmp->nm_flag & NFSMNT_INT)
124719449Sdfr		slpflag = PCATCH;
124819449Sdfr	gotiod = FALSE;
124919449Sdfr
125019449Sdfr	/*
125119449Sdfr	 * Find a free iod to process this request.
125219449Sdfr	 */
12531541Srgrimes	for (i = 0; i < NFS_MAXASYNCDAEMON; i++)
125419449Sdfr		if (nfs_iodwant[i]) {
125519449Sdfr			/*
125619449Sdfr			 * Found one, so wake it up and tell it which
125719449Sdfr			 * mount to process.
125819449Sdfr			 */
125919449Sdfr			NFS_DPF(ASYNCIO,
126019449Sdfr				("nfs_asyncio: waking iod %d for mount %p\n",
126119449Sdfr				 i, nmp));
126219449Sdfr			nfs_iodwant[i] = (struct proc *)0;
126319449Sdfr			nfs_iodmount[i] = nmp;
126419449Sdfr			nmp->nm_bufqiods++;
126519449Sdfr			wakeup((caddr_t)&nfs_iodwant[i]);
126619449Sdfr			gotiod = TRUE;
126725023Sdfr			break;
126819449Sdfr		}
126919449Sdfr
127019449Sdfr	/*
127119449Sdfr	 * If none are free, we may already have an iod working on this mount
127219449Sdfr	 * point.  If so, it will process our request.
127319449Sdfr	 */
127419449Sdfr	if (!gotiod) {
127519449Sdfr		if (nmp->nm_bufqiods > 0) {
127619449Sdfr			NFS_DPF(ASYNCIO,
127719449Sdfr				("nfs_asyncio: %d iods are already processing mount %p\n",
127819449Sdfr				 nmp->nm_bufqiods, nmp));
127919449Sdfr			gotiod = TRUE;
128019449Sdfr		}
128119449Sdfr	}
128219449Sdfr
128319449Sdfr	/*
128419449Sdfr	 * If we have an iod which can process the request, then queue
128519449Sdfr	 * the buffer.
128619449Sdfr	 */
128719449Sdfr	if (gotiod) {
128819449Sdfr		/*
128955431Sdillon		 * Ensure that the queue never grows too large.  We still want
129055431Sdillon		 * to asynchronize so we block rather then return EIO.
129119449Sdfr		 */
129219449Sdfr		while (nmp->nm_bufqlen >= 2*nfs_numasync) {
129319449Sdfr			NFS_DPF(ASYNCIO,
129419449Sdfr				("nfs_asyncio: waiting for mount %p queue to drain\n", nmp));
129519449Sdfr			nmp->nm_bufqwant = TRUE;
129619449Sdfr			error = tsleep(&nmp->nm_bufq, slpflag | PRIBIO,
129719449Sdfr				       "nfsaio", slptimeo);
129819449Sdfr			if (error) {
129946580Sphk				if (nfs_sigintr(nmp, NULL, procp))
130019449Sdfr					return (EINTR);
130119449Sdfr				if (slpflag == PCATCH) {
130219449Sdfr					slpflag = 0;
130319449Sdfr					slptimeo = 2 * hz;
130419449Sdfr				}
130519449Sdfr			}
130619449Sdfr			/*
130719449Sdfr			 * We might have lost our iod while sleeping,
130819449Sdfr			 * so check and loop if nescessary.
130919449Sdfr			 */
131019449Sdfr			if (nmp->nm_bufqiods == 0) {
131119449Sdfr				NFS_DPF(ASYNCIO,
131219449Sdfr					("nfs_asyncio: no iods after mount %p queue was drained, looping\n", nmp));
131319449Sdfr				goto again;
131419449Sdfr			}
131519449Sdfr		}
131619449Sdfr
131758345Sphk		if (bp->b_iocmd == BIO_READ) {
13181541Srgrimes			if (bp->b_rcred == NOCRED && cred != NOCRED) {
13191541Srgrimes				crhold(cred);
13201541Srgrimes				bp->b_rcred = cred;
13211541Srgrimes			}
13221541Srgrimes		} else {
13239336Sdfr			bp->b_flags |= B_WRITEINPROG;
13241541Srgrimes			if (bp->b_wcred == NOCRED && cred != NOCRED) {
13251541Srgrimes				crhold(cred);
13261541Srgrimes				bp->b_wcred = cred;
13271541Srgrimes			}
13281541Srgrimes		}
13298876Srgrimes
133048225Smckusick		BUF_KERNPROC(bp);
133119449Sdfr		TAILQ_INSERT_TAIL(&nmp->nm_bufq, bp, b_freelist);
133219449Sdfr		nmp->nm_bufqlen++;
13331541Srgrimes		return (0);
133419449Sdfr	}
13359336Sdfr
13369336Sdfr	/*
133719449Sdfr	 * All the iods are busy on other mounts, so return EIO to
133819449Sdfr	 * force the caller to process the i/o synchronously.
13399336Sdfr	 */
134019449Sdfr	NFS_DPF(ASYNCIO, ("nfs_asyncio: no iods available, i/o is synchronous\n"));
134119449Sdfr	return (EIO);
13421541Srgrimes}
13431541Srgrimes
13441541Srgrimes/*
13451541Srgrimes * Do an I/O operation to/from a cache block. This may be called
13461541Srgrimes * synchronously or from an nfsiod.
13471541Srgrimes */
13481541Srgrimesint
13491541Srgrimesnfs_doio(bp, cr, p)
135044679Sjulian	struct buf *bp;
13513305Sphk	struct ucred *cr;
13521541Srgrimes	struct proc *p;
13531541Srgrimes{
135444679Sjulian	struct uio *uiop;
135544679Sjulian	struct vnode *vp;
13561541Srgrimes	struct nfsnode *np;
13571541Srgrimes	struct nfsmount *nmp;
135846349Salc	int error = 0, iomode, must_commit = 0;
13591541Srgrimes	struct uio uio;
13601541Srgrimes	struct iovec io;
13611541Srgrimes
13621541Srgrimes	vp = bp->b_vp;
13631541Srgrimes	np = VTONFS(vp);
13641541Srgrimes	nmp = VFSTONFS(vp->v_mount);
13651541Srgrimes	uiop = &uio;
13661541Srgrimes	uiop->uio_iov = &io;
13671541Srgrimes	uiop->uio_iovcnt = 1;
13681541Srgrimes	uiop->uio_segflg = UIO_SYSSPACE;
13691541Srgrimes	uiop->uio_procp = p;
13701541Srgrimes
137146349Salc	/*
137258934Sphk	 * clear BIO_ERROR and B_INVAL state prior to initiating the I/O.  We
137346349Salc	 * do this here so we do not have to do it in all the code that
137446349Salc	 * calls us.
137546349Salc	 */
137658934Sphk	bp->b_flags &= ~B_INVAL;
137758934Sphk	bp->b_ioflags &= ~BIO_ERROR;
137846349Salc
137944679Sjulian	KASSERT(!(bp->b_flags & B_DONE), ("nfs_doio: bp %p already marked done", bp));
138044679Sjulian
13811541Srgrimes	/*
13821541Srgrimes	 * Historically, paging was done with physio, but no more.
13831541Srgrimes	 */
13843664Sphk	if (bp->b_flags & B_PHYS) {
13853664Sphk	    /*
13863664Sphk	     * ...though reading /dev/drum still gets us here.
13873664Sphk	     */
13881541Srgrimes	    io.iov_len = uiop->uio_resid = bp->b_bcount;
13893664Sphk	    /* mapping was done by vmapbuf() */
13901541Srgrimes	    io.iov_base = bp->b_data;
13919336Sdfr	    uiop->uio_offset = ((off_t)bp->b_blkno) * DEV_BSIZE;
139258345Sphk	    if (bp->b_iocmd == BIO_READ) {
13933664Sphk		uiop->uio_rw = UIO_READ;
13943664Sphk		nfsstats.read_physios++;
13953664Sphk		error = nfs_readrpc(vp, uiop, cr);
13963664Sphk	    } else {
13979336Sdfr		int com;
13989336Sdfr
13999336Sdfr		iomode = NFSV3WRITE_DATASYNC;
14003664Sphk		uiop->uio_rw = UIO_WRITE;
14013664Sphk		nfsstats.write_physios++;
14029336Sdfr		error = nfs_writerpc(vp, uiop, cr, &iomode, &com);
14033664Sphk	    }
14043664Sphk	    if (error) {
140558934Sphk		bp->b_ioflags |= BIO_ERROR;
14063664Sphk		bp->b_error = error;
14073664Sphk	    }
140858345Sphk	} else if (bp->b_iocmd == BIO_READ) {
14093664Sphk	    io.iov_len = uiop->uio_resid = bp->b_bcount;
14103664Sphk	    io.iov_base = bp->b_data;
14111541Srgrimes	    uiop->uio_rw = UIO_READ;
14121541Srgrimes	    switch (vp->v_type) {
14131541Srgrimes	    case VREG:
14149336Sdfr		uiop->uio_offset = ((off_t)bp->b_blkno) * DEV_BSIZE;
14151541Srgrimes		nfsstats.read_bios++;
14161541Srgrimes		error = nfs_readrpc(vp, uiop, cr);
14171541Srgrimes		if (!error) {
14181541Srgrimes		    if (uiop->uio_resid) {
14191541Srgrimes			/*
142046349Salc			 * If we had a short read with no error, we must have
142146349Salc			 * hit a file hole.  We should zero-fill the remainder.
142246349Salc			 * This can also occur if the server hits the file EOF.
142346349Salc			 *
142446349Salc			 * Holes used to be able to occur due to pending
142546349Salc			 * writes, but that is not possible any longer.
14261541Srgrimes			 */
142746349Salc			int nread = bp->b_bcount - uiop->uio_resid;
142846349Salc			int left  = bp->b_bcount - nread;
142946349Salc
143046349Salc			if (left > 0)
143146349Salc				bzero((char *)bp->b_data + nread, left);
143246349Salc			uiop->uio_resid = 0;
143346349Salc		    }
14341541Srgrimes		}
14351541Srgrimes		if (p && (vp->v_flag & VTEXT) &&
14361541Srgrimes			(((nmp->nm_flag & NFSMNT_NQNFS) &&
14379336Sdfr			  NQNFS_CKINVALID(vp, np, ND_READ) &&
14381541Srgrimes			  np->n_lrev != np->n_brev) ||
14391541Srgrimes			 (!(nmp->nm_flag & NFSMNT_NQNFS) &&
144018397Snate			  np->n_mtime != np->n_vattr.va_mtime.tv_sec))) {
14411541Srgrimes			uprintf("Process killed due to text file modification\n");
14421541Srgrimes			psignal(p, SIGKILL);
144345361Speter			PHOLD(p);
14441541Srgrimes		}
14451541Srgrimes		break;
14461541Srgrimes	    case VLNK:
14479336Sdfr		uiop->uio_offset = (off_t)0;
14481541Srgrimes		nfsstats.readlink_bios++;
14491541Srgrimes		error = nfs_readlinkrpc(vp, uiop, cr);
14501541Srgrimes		break;
14511541Srgrimes	    case VDIR:
14521541Srgrimes		nfsstats.readdir_bios++;
14539336Sdfr		uiop->uio_offset = ((u_quad_t)bp->b_lblkno) * NFS_DIRBLKSIZ;
14549336Sdfr		if (nmp->nm_flag & NFSMNT_RDIRPLUS) {
14559336Sdfr			error = nfs_readdirplusrpc(vp, uiop, cr);
14569336Sdfr			if (error == NFSERR_NOTSUPP)
14579336Sdfr				nmp->nm_flag &= ~NFSMNT_RDIRPLUS;
14589336Sdfr		}
14599336Sdfr		if ((nmp->nm_flag & NFSMNT_RDIRPLUS) == 0)
14609336Sdfr			error = nfs_readdirrpc(vp, uiop, cr);
146146349Salc		/*
146246349Salc		 * end-of-directory sets B_INVAL but does not generate an
146346349Salc		 * error.
146446349Salc		 */
146539782Smckusick		if (error == 0 && uiop->uio_resid == bp->b_bcount)
146639782Smckusick			bp->b_flags |= B_INVAL;
14671541Srgrimes		break;
14683305Sphk	    default:
14693305Sphk		printf("nfs_doio:  type %x unexpected\n",vp->v_type);
14703305Sphk		break;
14711541Srgrimes	    };
14721541Srgrimes	    if (error) {
147358934Sphk		bp->b_ioflags |= BIO_ERROR;
14741541Srgrimes		bp->b_error = error;
14751541Srgrimes	    }
14761541Srgrimes	} else {
147751344Sdillon	    /*
147851344Sdillon	     * If we only need to commit, try to commit
147951344Sdillon	     */
148051344Sdillon	    if (bp->b_flags & B_NEEDCOMMIT) {
148151344Sdillon		    int retv;
148251344Sdillon		    off_t off;
148351344Sdillon
148451344Sdillon		    off = ((u_quad_t)bp->b_blkno) * DEV_BSIZE + bp->b_dirtyoff;
148551344Sdillon		    bp->b_flags |= B_WRITEINPROG;
148651344Sdillon		    retv = nfs_commit(
148751344Sdillon				bp->b_vp, off, bp->b_dirtyend-bp->b_dirtyoff,
148851344Sdillon				bp->b_wcred, p);
148951344Sdillon		    bp->b_flags &= ~B_WRITEINPROG;
149051344Sdillon		    if (retv == 0) {
149151344Sdillon			    bp->b_dirtyoff = bp->b_dirtyend = 0;
149254480Sdillon			    bp->b_flags &= ~(B_NEEDCOMMIT | B_CLUSTEROK);
149351344Sdillon			    bp->b_resid = 0;
149451344Sdillon			    biodone(bp);
149551344Sdillon			    return (0);
149651344Sdillon		    }
149751344Sdillon		    if (retv == NFSERR_STALEWRITEVERF) {
149851344Sdillon			    nfs_clearcommit(bp->b_vp->v_mount);
149951344Sdillon		    }
150051344Sdillon	    }
150151344Sdillon
150251344Sdillon	    /*
150351344Sdillon	     * Setup for actual write
150451344Sdillon	     */
150551344Sdillon
150641791Sdt	    if ((off_t)bp->b_blkno * DEV_BSIZE + bp->b_dirtyend > np->n_size)
150741791Sdt		bp->b_dirtyend = np->n_size - (off_t)bp->b_blkno * DEV_BSIZE;
15088692Sdg
15098692Sdg	    if (bp->b_dirtyend > bp->b_dirtyoff) {
15108692Sdg		io.iov_len = uiop->uio_resid = bp->b_dirtyend
15119336Sdfr		    - bp->b_dirtyoff;
151241791Sdt		uiop->uio_offset = (off_t)bp->b_blkno * DEV_BSIZE
15139336Sdfr		    + bp->b_dirtyoff;
15148692Sdg		io.iov_base = (char *)bp->b_data + bp->b_dirtyoff;
15158692Sdg		uiop->uio_rw = UIO_WRITE;
15168692Sdg		nfsstats.write_bios++;
151744679Sjulian
151825785Sdfr		if ((bp->b_flags & (B_ASYNC | B_NEEDCOMMIT | B_NOCACHE | B_CLUSTER)) == B_ASYNC)
15199336Sdfr		    iomode = NFSV3WRITE_UNSTABLE;
15208692Sdg		else
15219336Sdfr		    iomode = NFSV3WRITE_FILESYNC;
152244679Sjulian
15239336Sdfr		bp->b_flags |= B_WRITEINPROG;
15249336Sdfr		error = nfs_writerpc(vp, uiop, cr, &iomode, &must_commit);
152551475Sdillon
152651475Sdillon		/*
152751475Sdillon		 * When setting B_NEEDCOMMIT also set B_CLUSTEROK to try
152851475Sdillon		 * to cluster the buffers needing commit.  This will allow
152951475Sdillon		 * the system to submit a single commit rpc for the whole
153054480Sdillon		 * cluster.  We can do this even if the buffer is not 100%
153154480Sdillon		 * dirty (relative to the NFS blocksize), so we optimize the
153254480Sdillon		 * append-to-file-case.
153354480Sdillon		 *
153454480Sdillon		 * (when clearing B_NEEDCOMMIT, B_CLUSTEROK must also be
153554480Sdillon		 * cleared because write clustering only works for commit
153654480Sdillon		 * rpc's, not for the data portion of the write).
153751475Sdillon		 */
153851475Sdillon
153925003Sdfr		if (!error && iomode == NFSV3WRITE_UNSTABLE) {
154025003Sdfr		    bp->b_flags |= B_NEEDCOMMIT;
154125003Sdfr		    if (bp->b_dirtyoff == 0
154246349Salc			&& bp->b_dirtyend == bp->b_bcount)
154325003Sdfr			bp->b_flags |= B_CLUSTEROK;
154444679Sjulian		} else {
154554480Sdillon		    bp->b_flags &= ~(B_NEEDCOMMIT | B_CLUSTEROK);
154644679Sjulian		}
15479336Sdfr		bp->b_flags &= ~B_WRITEINPROG;
15488692Sdg
15499336Sdfr		/*
15509336Sdfr		 * For an interrupted write, the buffer is still valid
15519336Sdfr		 * and the write hasn't been pushed to the server yet,
155258934Sphk		 * so we can't set BIO_ERROR and report the interruption
15539336Sdfr		 * by setting B_EINTR. For the B_ASYNC case, B_EINTR
15549336Sdfr		 * is not relevant, so the rpc attempt is essentially
15559336Sdfr		 * a noop.  For the case of a V3 write rpc not being
15569336Sdfr		 * committed to stable storage, the block is still
15579336Sdfr		 * dirty and requires either a commit rpc or another
15589336Sdfr		 * write rpc with iomode == NFSV3WRITE_FILESYNC before
15599336Sdfr		 * the block is reused. This is indicated by setting
15609336Sdfr		 * the B_DELWRI and B_NEEDCOMMIT flags.
156142957Sdillon		 *
156242957Sdillon		 * If the buffer is marked B_PAGING, it does not reside on
156344679Sjulian		 * the vp's paging queues so we cannot call bdirty().  The
156444679Sjulian		 * bp in this case is not an NFS cache block so we should
156544679Sjulian		 * be safe. XXX
15669336Sdfr		 */
15679336Sdfr    		if (error == EINTR
15689336Sdfr		    || (!error && (bp->b_flags & B_NEEDCOMMIT))) {
156934266Sjulian			int s;
157034266Sjulian
157144679Sjulian			s = splbio();
15728692Sdg			bp->b_flags &= ~(B_INVAL|B_NOCACHE);
157342957Sdillon			if ((bp->b_flags & B_PAGING) == 0) {
157444679Sjulian			    bdirty(bp);
157544679Sjulian			    bp->b_flags &= ~B_DONE;
157642957Sdillon			}
157747749Speter			if (error && (bp->b_flags & B_ASYNC) == 0)
157832755Sdyson			    bp->b_flags |= B_EINTR;
157944679Sjulian			splx(s);
15808692Sdg	    	} else {
158144679Sjulian		    if (error) {
158258934Sphk			bp->b_ioflags |= BIO_ERROR;
158344679Sjulian			bp->b_error = np->n_error = error;
158444679Sjulian			np->n_flag |= NWRITEERR;
158544679Sjulian		    }
158644679Sjulian		    bp->b_dirtyoff = bp->b_dirtyend = 0;
15878692Sdg		}
15881541Srgrimes	    } else {
15898692Sdg		bp->b_resid = 0;
15908692Sdg		biodone(bp);
15918692Sdg		return (0);
15921541Srgrimes	    }
15931541Srgrimes	}
15941541Srgrimes	bp->b_resid = uiop->uio_resid;
15959336Sdfr	if (must_commit)
159644679Sjulian	    nfs_clearcommit(vp->v_mount);
15971541Srgrimes	biodone(bp);
15981541Srgrimes	return (error);
15991541Srgrimes}
1600