nfs_clbio.c revision 194425
154359Sroberto/*-
254359Sroberto * Copyright (c) 1989, 1993
354359Sroberto *	The Regents of the University of California.  All rights reserved.
454359Sroberto *
554359Sroberto * This code is derived from software contributed to Berkeley by
654359Sroberto * Rick Macklem at The University of Guelph.
754359Sroberto *
854359Sroberto * Redistribution and use in source and binary forms, with or without
954359Sroberto * modification, are permitted provided that the following conditions
1054359Sroberto * are met:
1182498Sroberto * 1. Redistributions of source code must retain the above copyright
1254359Sroberto *    notice, this list of conditions and the following disclaimer.
1354359Sroberto * 2. Redistributions in binary form must reproduce the above copyright
14285612Sdelphij *    notice, this list of conditions and the following disclaimer in the
1554359Sroberto *    documentation and/or other materials provided with the distribution.
1682498Sroberto * 4. Neither the name of the University nor the names of its contributors
1782498Sroberto *    may be used to endorse or promote products derived from this software
1882498Sroberto *    without specific prior written permission.
1982498Sroberto *
2082498Sroberto * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
2182498Sroberto * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
2254359Sroberto * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
2354359Sroberto * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
2482498Sroberto * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
2582498Sroberto * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
2682498Sroberto * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
2754359Sroberto * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28285612Sdelphij * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
29285612Sdelphij * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30285612Sdelphij * SUCH DAMAGE.
31285612Sdelphij *
32285612Sdelphij *	@(#)nfs_bio.c	8.9 (Berkeley) 3/30/95
3354359Sroberto */
3454359Sroberto
35285612Sdelphij#include <sys/cdefs.h>
36285612Sdelphij__FBSDID("$FreeBSD: head/sys/fs/nfsclient/nfs_clbio.c 194425 2009-06-18 05:56:24Z alc $");
37285612Sdelphij
3854359Sroberto#include <sys/param.h>
3954359Sroberto#include <sys/systm.h>
40285612Sdelphij#include <sys/bio.h>
4154359Sroberto#include <sys/buf.h>
42285612Sdelphij#include <sys/kernel.h>
43285612Sdelphij#include <sys/mount.h>
44285612Sdelphij#include <sys/proc.h>
4554359Sroberto#include <sys/resourcevar.h>
4654359Sroberto#include <sys/signalvar.h>
47285612Sdelphij#include <sys/vmmeter.h>
48285612Sdelphij#include <sys/vnode.h>
49285612Sdelphij
50285612Sdelphij#include <vm/vm.h>
51285612Sdelphij#include <vm/vm_extern.h>
5254359Sroberto#include <vm/vm_page.h>
5354359Sroberto#include <vm/vm_object.h>
5454359Sroberto#include <vm/vm_pager.h>
55285612Sdelphij#include <vm/vnode_pager.h>
56285612Sdelphij
57285612Sdelphij#include <fs/nfs/nfsport.h>
58285612Sdelphij#include <fs/nfsclient/nfsmount.h>
59285612Sdelphij#include <fs/nfsclient/nfs.h>
6054359Sroberto#include <fs/nfsclient/nfsnode.h>
6182498Sroberto
62182007Srobertoextern int newnfs_directio_allow_mmap;
6354359Srobertoextern struct nfsstats newnfsstats;
64182007Srobertoextern struct mtx ncl_iod_mutex;
6554359Srobertoextern int ncl_numasync;
6654359Srobertoextern struct proc *ncl_iodwant[NFS_MAXRAHEAD];
6754359Srobertoextern struct nfsmount *ncl_iodmount[NFS_MAXRAHEAD];
6854359Srobertoextern int newnfs_directio_enable;
69285612Sdelphij
70285612Sdelphijint ncl_pbuf_freecnt = -1;	/* start out unlimited */
71285612Sdelphij
7254359Srobertostatic struct buf *nfs_getcacheblk(struct vnode *vp, daddr_t bn, int size,
73182007Sroberto    struct thread *td);
7454359Srobertostatic int nfs_directio_write(struct vnode *vp, struct uio *uiop,
7554359Sroberto    struct ucred *cred, int ioflag);
7654359Sroberto
7754359Sroberto/*
7854359Sroberto * Any signal that can interrupt an NFS operation in an intr mount
7954359Sroberto * should be added to this set. SIGSTOP and SIGKILL cannot be masked.
8054359Sroberto */
8154359Srobertostatic int nfs_sig_set[] = {
8254359Sroberto	SIGINT,
8354359Sroberto	SIGTERM,
8454359Sroberto	SIGHUP,
8554359Sroberto	SIGKILL,
8654359Sroberto	SIGSTOP,
8754359Sroberto	SIGQUIT
8854359Sroberto};
89132451Sroberto
90132451Sroberto#ifdef notnow
9154359Sroberto/*
92182007Sroberto * Check to see if one of the signals in our subset is pending on
93182007Sroberto * the process (in an intr mount).
94182007Sroberto */
95285612Sdelphijint
96285612Sdelphijncl_sig_pending(sigset_t set)
97285612Sdelphij{
98182007Sroberto	int i;
99285612Sdelphij
100285612Sdelphij	for (i = 0 ; i < sizeof(nfs_sig_set)/sizeof(int) ; i++)
101285612Sdelphij		if (SIGISMEMBER(set, nfs_sig_set[i]))
102182007Sroberto			return (1);
103285612Sdelphij	return (0);
104285612Sdelphij}
105182007Sroberto#endif
106285612Sdelphij
107285612Sdelphij/*
108285612Sdelphij * The set/restore sigmask functions are used to (temporarily) overwrite
109285612Sdelphij * the process p_sigmask during an RPC call (for example). These are also
110182007Sroberto * used in other places in the NFS client that might tsleep().
111285612Sdelphij */
112285612Sdelphijstatic void
113285612Sdelphijncl_set_sigmask(struct thread *td, sigset_t *oldset)
114182007Sroberto{
115285612Sdelphij	sigset_t newset;
116285612Sdelphij	int i;
11754359Sroberto	struct proc *p;
118132451Sroberto
119285612Sdelphij	SIGFILLSET(newset);
12054359Sroberto	if (td == NULL)
12154359Sroberto		td = curthread; /* XXX */
12254359Sroberto	p = td->td_proc;
123285612Sdelphij	/* Remove the NFS set of signals from newset */
12454359Sroberto	PROC_LOCK(p);
12554359Sroberto	mtx_lock(&p->p_sigacts->ps_mtx);
12654359Sroberto	for (i = 0 ; i < sizeof(nfs_sig_set)/sizeof(int) ; i++) {
12754359Sroberto		/*
12854359Sroberto		 * But make sure we leave the ones already masked
12954359Sroberto		 * by the process, ie. remove the signal from the
13054359Sroberto		 * temporary signalmask only if it wasn't already
13154359Sroberto		 * in p_sigmask.
13254359Sroberto		 */
13354359Sroberto		if (!SIGISMEMBER(td->td_sigmask, nfs_sig_set[i]) &&
134285612Sdelphij		    !SIGISMEMBER(p->p_sigacts->ps_sigignore, nfs_sig_set[i]))
13554359Sroberto			SIGDELSET(newset, nfs_sig_set[i]);
136285612Sdelphij	}
13754359Sroberto	mtx_unlock(&p->p_sigacts->ps_mtx);
13854359Sroberto	PROC_UNLOCK(p);
13954359Sroberto	kern_sigprocmask(td, SIG_SETMASK, &newset, oldset, 0);
14054359Sroberto}
14154359Sroberto
14254359Srobertostatic void
14354359Srobertoncl_restore_sigmask(struct thread *td, sigset_t *set)
14454359Sroberto{
14554359Sroberto	if (td == NULL)
14654359Sroberto		td = curthread; /* XXX */
14754359Sroberto	kern_sigprocmask(td, SIG_SETMASK, set, NULL, 0);
14854359Sroberto}
14954359Sroberto
15054359Sroberto/*
15154359Sroberto * NFS wrapper to msleep(), that shoves a new p_sigmask and restores the
15254359Sroberto * old one after msleep() returns.
15354359Sroberto */
15454359Srobertoint
15554359Srobertoncl_msleep(struct thread *td, void *ident, struct mtx *mtx, int priority, char *wmesg, int timo)
15654359Sroberto{
15754359Sroberto	sigset_t oldset;
15854359Sroberto	int error;
15954359Sroberto	struct proc *p;
16054359Sroberto
16154359Sroberto	if ((priority & PCATCH) == 0)
16254359Sroberto		return msleep(ident, mtx, priority, wmesg, timo);
16354359Sroberto	if (td == NULL)
16454359Sroberto		td = curthread; /* XXX */
16554359Sroberto	ncl_set_sigmask(td, &oldset);
16654359Sroberto	error = msleep(ident, mtx, priority, wmesg, timo);
16754359Sroberto	ncl_restore_sigmask(td, &oldset);
16854359Sroberto	p = td->td_proc;
16954359Sroberto	return (error);
170132451Sroberto}
17154359Sroberto
17254359Sroberto/*
17354359Sroberto * Vnode op for VM getpages.
17454359Sroberto */
175285612Sdelphijint
17654359Srobertoncl_getpages(struct vop_getpages_args *ap)
17754359Sroberto{
17854359Sroberto	int i, error, nextoff, size, toff, count, npages;
17954359Sroberto	struct uio uio;
18054359Sroberto	struct iovec iov;
18154359Sroberto	vm_offset_t kva;
18254359Sroberto	struct buf *bp;
18354359Sroberto	struct vnode *vp;
18454359Sroberto	struct thread *td;
18554359Sroberto	struct ucred *cred;
186285612Sdelphij	struct nfsmount *nmp;
18754359Sroberto	vm_object_t object;
18854359Sroberto	vm_page_t *pages;
18954359Sroberto	struct nfsnode *np;
19054359Sroberto
19154359Sroberto	vp = ap->a_vp;
19254359Sroberto	np = VTONFS(vp);
193132451Sroberto	td = curthread;				/* XXX */
194285612Sdelphij	cred = curthread->td_ucred;		/* XXX */
195182007Sroberto	nmp = VFSTONFS(vp->v_mount);
19654359Sroberto	pages = ap->a_m;
197182007Sroberto	count = ap->a_count;
19854359Sroberto
199285612Sdelphij	if ((object = vp->v_object) == NULL) {
20054359Sroberto		ncl_printf("nfs_getpages: called with non-merged cache vnode??\n");
201285612Sdelphij		return (VM_PAGER_ERROR);
20254359Sroberto	}
20354359Sroberto
20454359Sroberto	if (newnfs_directio_enable && !newnfs_directio_allow_mmap) {
20554359Sroberto		mtx_lock(&np->n_mtx);
20654359Sroberto		if ((np->n_flag & NNONCACHE) && (vp->v_type == VREG)) {
207132451Sroberto			mtx_unlock(&np->n_mtx);
20854359Sroberto			ncl_printf("nfs_getpages: called on non-cacheable vnode??\n");
20954359Sroberto			return (VM_PAGER_ERROR);
21054359Sroberto		} else
211132451Sroberto			mtx_unlock(&np->n_mtx);
21254359Sroberto	}
21354359Sroberto
21454359Sroberto	mtx_lock(&nmp->nm_mtx);
21554359Sroberto	if ((nmp->nm_flag & NFSMNT_NFSV3) != 0 &&
21654359Sroberto	    (nmp->nm_state & NFSSTA_GOTFSINFO) == 0) {
21754359Sroberto		mtx_unlock(&nmp->nm_mtx);
21854359Sroberto		/* We'll never get here for v4, because we always have fsinfo */
21954359Sroberto		(void)ncl_fsinfo(nmp, vp, cred, td);
22054359Sroberto	} else
22182498Sroberto		mtx_unlock(&nmp->nm_mtx);
22254359Sroberto
22354359Sroberto	npages = btoc(count);
224132451Sroberto
22554359Sroberto	/*
22654359Sroberto	 * If the requested page is partially valid, just return it and
22754359Sroberto	 * allow the pager to zero-out the blanks.  Partially valid pages
22854359Sroberto	 * can only occur at the file EOF.
22954359Sroberto	 */
23054359Sroberto	VM_OBJECT_LOCK(object);
23154359Sroberto	if (pages[ap->a_reqpage]->valid != 0) {
23254359Sroberto		vm_page_lock_queues();
23354359Sroberto		for (i = 0; i < npages; ++i) {
23454359Sroberto			if (i != ap->a_reqpage)
23554359Sroberto				vm_page_free(pages[i]);
23654359Sroberto		}
23754359Sroberto		vm_page_unlock_queues();
23854359Sroberto		VM_OBJECT_UNLOCK(object);
23954359Sroberto		return (0);
24054359Sroberto	}
24154359Sroberto	VM_OBJECT_UNLOCK(object);
24254359Sroberto
24354359Sroberto	/*
244285612Sdelphij	 * We use only the kva address for the buffer, but this is extremely
24554359Sroberto	 * convienient and fast.
246182007Sroberto	 */
24754359Sroberto	bp = getpbuf(&ncl_pbuf_freecnt);
24854359Sroberto
24954359Sroberto	kva = (vm_offset_t) bp->b_data;
25054359Sroberto	pmap_qenter(kva, pages, npages);
25154359Sroberto	PCPU_INC(cnt.v_vnodein);
252285612Sdelphij	PCPU_ADD(cnt.v_vnodepgsin, npages);
25354359Sroberto
25454359Sroberto	iov.iov_base = (caddr_t) kva;
25554359Sroberto	iov.iov_len = count;
25654359Sroberto	uio.uio_iov = &iov;
257182007Sroberto	uio.uio_iovcnt = 1;
258182007Sroberto	uio.uio_offset = IDX_TO_OFF(pages[0]->pindex);
259182007Sroberto	uio.uio_resid = count;
260182007Sroberto	uio.uio_segflg = UIO_SYSSPACE;
261285612Sdelphij	uio.uio_rw = UIO_READ;
262182007Sroberto	uio.uio_td = td;
263182007Sroberto
264285612Sdelphij	error = ncl_readrpc(vp, &uio, cred);
265285612Sdelphij	pmap_qremove(kva, npages);
266182007Sroberto
267285612Sdelphij	relpbuf(bp, &ncl_pbuf_freecnt);
268285612Sdelphij
269285612Sdelphij	if (error && (uio.uio_resid == count)) {
270285612Sdelphij		ncl_printf("nfs_getpages: error %d\n", error);
271285612Sdelphij		VM_OBJECT_LOCK(object);
272285612Sdelphij		vm_page_lock_queues();
273182007Sroberto		for (i = 0; i < npages; ++i) {
274182007Sroberto			if (i != ap->a_reqpage)
275182007Sroberto				vm_page_free(pages[i]);
276182007Sroberto		}
27754359Sroberto		vm_page_unlock_queues();
27854359Sroberto		VM_OBJECT_UNLOCK(object);
27954359Sroberto		return (VM_PAGER_ERROR);
28054359Sroberto	}
28154359Sroberto
28254359Sroberto	/*
28354359Sroberto	 * Calculate the number of bytes read and validate only that number
28454359Sroberto	 * of bytes.  Note that due to pending writes, size may be 0.  This
28554359Sroberto	 * does not mean that the remaining data is invalid!
28654359Sroberto	 */
28754359Sroberto
28854359Sroberto	size = count - uio.uio_resid;
28954359Sroberto	VM_OBJECT_LOCK(object);
29054359Sroberto	vm_page_lock_queues();
29154359Sroberto	for (i = 0, toff = 0; i < npages; i++, toff = nextoff) {
29254359Sroberto		vm_page_t m;
29354359Sroberto		nextoff = toff + PAGE_SIZE;
29454359Sroberto		m = pages[i];
295182007Sroberto
29654359Sroberto		if (nextoff <= size) {
29754359Sroberto			/*
29854359Sroberto			 * Read operation filled an entire page
29954359Sroberto			 */
30054359Sroberto			m->valid = VM_PAGE_BITS_ALL;
30154359Sroberto			KASSERT(m->dirty == 0,
30254359Sroberto			    ("nfs_getpages: page %p is dirty", m));
30354359Sroberto		} else if (size > toff) {
30454359Sroberto			/*
30554359Sroberto			 * Read operation filled a partial page.
30654359Sroberto			 */
307132451Sroberto			m->valid = 0;
30854359Sroberto			vm_page_set_valid(m, 0, size - toff);
30954359Sroberto			KASSERT(m->dirty == 0,
31054359Sroberto			    ("nfs_getpages: page %p is dirty", m));
31154359Sroberto		} else {
31254359Sroberto			/*
31354359Sroberto			 * Read operation was short.  If no error occured
314285612Sdelphij			 * we may have hit a zero-fill section.   We simply
31582498Sroberto			 * leave valid set to 0.
316285612Sdelphij			 */
317285612Sdelphij			;
318182007Sroberto		}
31982498Sroberto		if (i != ap->a_reqpage) {
32082498Sroberto			/*
321285612Sdelphij			 * Whether or not to leave the page activated is up in
32254359Sroberto			 * the air, but we should put the page on a page queue
32354359Sroberto			 * somewhere (it already is in the object).  Result:
32454359Sroberto			 * It appears that emperical results show that
32554359Sroberto			 * deactivating pages is best.
32654359Sroberto			 */
32754359Sroberto
328182007Sroberto			/*
329182007Sroberto			 * Just in case someone was asking for this page we
33054359Sroberto			 * now tell them that it is ok to use.
33154359Sroberto			 */
33254359Sroberto			if (!error) {
333182007Sroberto				if (m->oflags & VPO_WANTED)
33454359Sroberto					vm_page_activate(m);
33554359Sroberto				else
33654359Sroberto					vm_page_deactivate(m);
33754359Sroberto				vm_page_wakeup(m);
33854359Sroberto			} else {
33954359Sroberto				vm_page_free(m);
34054359Sroberto			}
34154359Sroberto		}
34254359Sroberto	}
34354359Sroberto	vm_page_unlock_queues();
34454359Sroberto	VM_OBJECT_UNLOCK(object);
34554359Sroberto	return (0);
34654359Sroberto}
34754359Sroberto
34854359Sroberto/*
34954359Sroberto * Vnode op for VM putpages.
350285612Sdelphij */
35154359Srobertoint
352285612Sdelphijncl_putpages(struct vop_putpages_args *ap)
353285612Sdelphij{
35454359Sroberto	struct uio uio;
355182007Sroberto	struct iovec iov;
356182007Sroberto	vm_offset_t kva;
35754359Sroberto	struct buf *bp;
35854359Sroberto	int iomode, must_commit, i, error, npages, count;
35954359Sroberto	off_t offset;
36082498Sroberto	int *rtvals;
36182498Sroberto	struct vnode *vp;
36282498Sroberto	struct thread *td;
36354359Sroberto	struct ucred *cred;
36454359Sroberto	struct nfsmount *nmp;
36554359Sroberto	struct nfsnode *np;
366132451Sroberto	vm_page_t *pages;
367132451Sroberto
368132451Sroberto	vp = ap->a_vp;
36954359Sroberto	np = VTONFS(vp);
37054359Sroberto	td = curthread;				/* XXX */
37154359Sroberto	cred = curthread->td_ucred;		/* XXX */
372132451Sroberto	nmp = VFSTONFS(vp->v_mount);
37354359Sroberto	pages = ap->a_m;
37454359Sroberto	count = ap->a_count;
37554359Sroberto	rtvals = ap->a_rtvals;
376132451Sroberto	npages = btoc(count);
377132451Sroberto	offset = IDX_TO_OFF(pages[0]->pindex);
378132451Sroberto
37954359Sroberto	mtx_lock(&nmp->nm_mtx);
38054359Sroberto	if ((nmp->nm_flag & NFSMNT_NFSV3) != 0 &&
38154359Sroberto	    (nmp->nm_state & NFSSTA_GOTFSINFO) == 0) {
382182007Sroberto		mtx_unlock(&nmp->nm_mtx);
38354359Sroberto		(void)ncl_fsinfo(nmp, vp, cred, td);
38454359Sroberto	} else
385285612Sdelphij		mtx_unlock(&nmp->nm_mtx);
38654359Sroberto
38754359Sroberto	mtx_lock(&np->n_mtx);
38854359Sroberto	if (newnfs_directio_enable && !newnfs_directio_allow_mmap &&
38954359Sroberto	    (np->n_flag & NNONCACHE) && (vp->v_type == VREG)) {
39054359Sroberto		mtx_unlock(&np->n_mtx);
39154359Sroberto		ncl_printf("ncl_putpages: called on noncache-able vnode??\n");
392182007Sroberto		mtx_lock(&np->n_mtx);
393182007Sroberto	}
394182007Sroberto
395182007Sroberto	for (i = 0; i < npages; i++)
396182007Sroberto		rtvals[i] = VM_PAGER_AGAIN;
397182007Sroberto
398182007Sroberto	/*
39954359Sroberto	 * When putting pages, do not extend file past EOF.
400285612Sdelphij	 */
401285612Sdelphij	if (offset + count > np->n_size) {
402285612Sdelphij		count = np->n_size - offset;
40354359Sroberto		if (count < 0)
40454359Sroberto			count = 0;
405132451Sroberto	}
40654359Sroberto	mtx_unlock(&np->n_mtx);
40754359Sroberto
40854359Sroberto	/*
40954359Sroberto	 * We use only the kva address for the buffer, but this is extremely
41054359Sroberto	 * convienient and fast.
41154359Sroberto	 */
41254359Sroberto	bp = getpbuf(&ncl_pbuf_freecnt);
41354359Sroberto
41454359Sroberto	kva = (vm_offset_t) bp->b_data;
41554359Sroberto	pmap_qenter(kva, pages, npages);
41654359Sroberto	PCPU_INC(cnt.v_vnodeout);
41754359Sroberto	PCPU_ADD(cnt.v_vnodepgsout, count);
418182007Sroberto
419132451Sroberto	iov.iov_base = (caddr_t) kva;
420132451Sroberto	iov.iov_len = count;
421132451Sroberto	uio.uio_iov = &iov;
422285612Sdelphij	uio.uio_iovcnt = 1;
42354359Sroberto	uio.uio_offset = offset;
42454359Sroberto	uio.uio_resid = count;
42554359Sroberto	uio.uio_segflg = UIO_SYSSPACE;
426182007Sroberto	uio.uio_rw = UIO_WRITE;
427285612Sdelphij	uio.uio_td = td;
428285612Sdelphij
429285612Sdelphij	if ((ap->a_sync & VM_PAGER_PUT_SYNC) == 0)
430285612Sdelphij	    iomode = NFSWRITE_UNSTABLE;
431285612Sdelphij	else
432285612Sdelphij	    iomode = NFSWRITE_FILESYNC;
433285612Sdelphij
434285612Sdelphij	error = ncl_writerpc(vp, &uio, cred, &iomode, &must_commit);
435285612Sdelphij
43654359Sroberto	pmap_qremove(kva, npages);
43754359Sroberto	relpbuf(bp, &ncl_pbuf_freecnt);
43854359Sroberto
43954359Sroberto	if (!error) {
44054359Sroberto		int nwritten = round_page(count - uio.uio_resid) / PAGE_SIZE;
441182007Sroberto		for (i = 0; i < nwritten; i++) {
44282498Sroberto			rtvals[i] = VM_PAGER_OK;
44382498Sroberto			vm_page_undirty(pages[i]);
44482498Sroberto		}
44554359Sroberto		if (must_commit) {
44654359Sroberto			ncl_clearcommit(vp->v_mount);
44754359Sroberto		}
448132451Sroberto	}
44954359Sroberto	return rtvals[0];
45054359Sroberto}
451285612Sdelphij
452182007Sroberto/*
453182007Sroberto * For nfs, cache consistency can only be maintained approximately.
45454359Sroberto * Although RFC1094 does not specify the criteria, the following is
45554359Sroberto * believed to be compatible with the reference port.
45654359Sroberto * For nfs:
45754359Sroberto * If the file's modify time on the server has changed since the
45854359Sroberto * last read rpc or you have written to the file,
459132451Sroberto * you may have lost data cache consistency with the
460132451Sroberto * server, so flush all of the file's data out of the cache.
461132451Sroberto * Then force a getattr rpc to ensure that you have up to date
462132451Sroberto * attributes.
463132451Sroberto * NB: This implies that cache data can be read when up to
464132451Sroberto * NFS_ATTRTIMEO seconds out of date. If you find that you need current
465132451Sroberto * attributes this could be forced by setting n_attrstamp to 0 before
46654359Sroberto * the VOP_GETATTR() call.
467182007Sroberto */
46854359Srobertostatic inline int
469285612Sdelphijnfs_bioread_check_cons(struct vnode *vp, struct thread *td, struct ucred *cred)
47054359Sroberto{
47154359Sroberto	int error = 0;
47254359Sroberto	struct vattr vattr;
47354359Sroberto	struct nfsnode *np = VTONFS(vp);
47454359Sroberto	int old_lock;
47554359Sroberto
476182007Sroberto	/*
47782498Sroberto	 * Grab the exclusive lock before checking whether the cache is
47854359Sroberto	 * consistent.
47954359Sroberto	 * XXX - We can make this cheaper later (by acquiring cheaper locks).
48054359Sroberto	 * But for now, this suffices.
48154359Sroberto	 */
48254359Sroberto	old_lock = ncl_upgrade_vnlock(vp);
48354359Sroberto	if (vp->v_iflag & VI_DOOMED) {
48454359Sroberto		ncl_downgrade_vnlock(vp, old_lock);
48554359Sroberto		return (EBADF);
48682498Sroberto	}
48754359Sroberto
488182007Sroberto	mtx_lock(&np->n_mtx);
489182007Sroberto	if (np->n_flag & NMODIFIED) {
490182007Sroberto		mtx_unlock(&np->n_mtx);
491182007Sroberto		if (vp->v_type != VREG) {
492182007Sroberto			if (vp->v_type != VDIR)
493182007Sroberto				panic("nfs: bioread, not dir");
494182007Sroberto			ncl_invaldir(vp);
495182007Sroberto			error = ncl_vinvalbuf(vp, V_SAVE, td, 1);
496182007Sroberto			if (error)
49754359Sroberto				goto out;
49854359Sroberto		}
49954359Sroberto		np->n_attrstamp = 0;
50082498Sroberto		error = VOP_GETATTR(vp, &vattr, cred);
501285612Sdelphij		if (error)
50254359Sroberto			goto out;
503285612Sdelphij		mtx_lock(&np->n_mtx);
50454359Sroberto		np->n_mtime = vattr.va_mtime;
50554359Sroberto		mtx_unlock(&np->n_mtx);
50654359Sroberto	} else {
50754359Sroberto		mtx_unlock(&np->n_mtx);
50854359Sroberto		error = VOP_GETATTR(vp, &vattr, cred);
50954359Sroberto		if (error)
51054359Sroberto			return (error);
51154359Sroberto		mtx_lock(&np->n_mtx);
51254359Sroberto		if ((np->n_flag & NSIZECHANGED)
51354359Sroberto		    || (NFS_TIMESPEC_COMPARE(&np->n_mtime, &vattr.va_mtime))) {
51454359Sroberto			mtx_unlock(&np->n_mtx);
51554359Sroberto			if (vp->v_type == VDIR)
51654359Sroberto				ncl_invaldir(vp);
51754359Sroberto			error = ncl_vinvalbuf(vp, V_SAVE, td, 1);
51854359Sroberto			if (error)
51954359Sroberto				goto out;
52054359Sroberto			mtx_lock(&np->n_mtx);
52154359Sroberto			np->n_mtime = vattr.va_mtime;
52254359Sroberto			np->n_flag &= ~NSIZECHANGED;
52354359Sroberto		}
52454359Sroberto		mtx_unlock(&np->n_mtx);
52554359Sroberto	}
526132451Srobertoout:
52754359Sroberto	ncl_downgrade_vnlock(vp, old_lock);
52854359Sroberto	return error;
52954359Sroberto}
53054359Sroberto
53154359Sroberto/*
53254359Sroberto * Vnode op for read using bio
53354359Sroberto */
53454359Srobertoint
53554359Srobertoncl_bioread(struct vnode *vp, struct uio *uio, int ioflag, struct ucred *cred)
536182007Sroberto{
53754359Sroberto	struct nfsnode *np = VTONFS(vp);
538182007Sroberto	int biosize, i;
539182007Sroberto	struct buf *bp, *rabp;
540182007Sroberto	struct thread *td;
541182007Sroberto	struct nfsmount *nmp = VFSTONFS(vp->v_mount);
542285612Sdelphij	daddr_t lbn, rabn;
543182007Sroberto	int bcount;
54454359Sroberto	int seqcount;
54554359Sroberto	int nra, error = 0, n = 0, on = 0;
546132451Sroberto
547285612Sdelphij#ifdef DIAGNOSTIC
548285612Sdelphij	if (uio->uio_rw != UIO_READ)
549285612Sdelphij		panic("ncl_read mode");
55054359Sroberto#endif
55154359Sroberto	if (uio->uio_resid == 0)
552182007Sroberto		return (0);
55382498Sroberto	if (uio->uio_offset < 0)	/* XXX VDIR cookies can be negative */
554285612Sdelphij		return (EINVAL);
555285612Sdelphij	td = uio->uio_td;
556285612Sdelphij
557285612Sdelphij	mtx_lock(&nmp->nm_mtx);
55882498Sroberto	if ((nmp->nm_flag & NFSMNT_NFSV3) != 0 &&
55982498Sroberto	    (nmp->nm_state & NFSSTA_GOTFSINFO) == 0) {
56054359Sroberto		mtx_unlock(&nmp->nm_mtx);
56154359Sroberto		(void)ncl_fsinfo(nmp, vp, cred, td);
562182007Sroberto		mtx_lock(&nmp->nm_mtx);
56354359Sroberto	}
56454359Sroberto	if (nmp->nm_rsize == 0 || nmp->nm_readdirsize == 0)
56554359Sroberto		(void) newnfs_iosize(nmp);
56654359Sroberto	mtx_unlock(&nmp->nm_mtx);
567182007Sroberto
568182007Sroberto	if (vp->v_type != VDIR &&
569182007Sroberto	    (uio->uio_offset + uio->uio_resid) > nmp->nm_maxfilesize)
570182007Sroberto		return (EFBIG);
57154359Sroberto
57254359Sroberto	if (newnfs_directio_enable && (ioflag & IO_DIRECT) && (vp->v_type == VREG))
57354359Sroberto		/* No caching/ no readaheads. Just read data into the user buffer */
57454359Sroberto		return ncl_readrpc(vp, uio, cred);
575182007Sroberto
576182007Sroberto	biosize = vp->v_mount->mnt_stat.f_iosize;
577182007Sroberto	seqcount = (int)((off_t)(ioflag >> IO_SEQSHIFT) * biosize / BKVASIZE);
57854359Sroberto
57954359Sroberto	error = nfs_bioread_check_cons(vp, td, cred);
580285612Sdelphij	if (error)
581285612Sdelphij		return error;
582285612Sdelphij
58354359Sroberto	do {
584285612Sdelphij	    u_quad_t nsize;
585285612Sdelphij
586182007Sroberto	    mtx_lock(&np->n_mtx);
587285612Sdelphij	    nsize = np->n_size;
588285612Sdelphij	    mtx_unlock(&np->n_mtx);
589285612Sdelphij
590285612Sdelphij	    switch (vp->v_type) {
591285612Sdelphij	    case VREG:
592285612Sdelphij		NFSINCRGLOBAL(newnfsstats.biocache_reads);
593285612Sdelphij		lbn = uio->uio_offset / biosize;
594285612Sdelphij		on = uio->uio_offset & (biosize - 1);
595285612Sdelphij
596182007Sroberto		/*
597182007Sroberto		 * Start the read ahead(s), as required.
598182007Sroberto		 */
599285612Sdelphij		if (nmp->nm_readahead > 0) {
600285612Sdelphij		    for (nra = 0; nra < nmp->nm_readahead && nra < seqcount &&
601285612Sdelphij			(off_t)(lbn + 1 + nra) * biosize < nsize; nra++) {
602285612Sdelphij			rabn = lbn + 1 + nra;
603285612Sdelphij			if (incore(&vp->v_bufobj, rabn) == NULL) {
604285612Sdelphij			    rabp = nfs_getcacheblk(vp, rabn, biosize, td);
605285612Sdelphij			    if (!rabp) {
606285612Sdelphij				error = newnfs_sigintr(nmp, td);
607285612Sdelphij				if (error)
608285612Sdelphij				    return (error);
609285612Sdelphij				else
610285612Sdelphij				    break;
611285612Sdelphij			    }
612285612Sdelphij			    if ((rabp->b_flags & (B_CACHE|B_DELWRI)) == 0) {
613182007Sroberto				rabp->b_flags |= B_ASYNC;
614182007Sroberto				rabp->b_iocmd = BIO_READ;
615182007Sroberto				vfs_busy_pages(rabp, 0);
616182007Sroberto				if (ncl_asyncio(nmp, rabp, cred, td)) {
617182007Sroberto				    rabp->b_flags |= B_INVAL;
618182007Sroberto				    rabp->b_ioflags |= BIO_ERROR;
619182007Sroberto				    vfs_unbusy_pages(rabp);
620182007Sroberto				    brelse(rabp);
621182007Sroberto				    break;
622182007Sroberto				}
623182007Sroberto			    } else {
624182007Sroberto				brelse(rabp);
625182007Sroberto			    }
626182007Sroberto			}
627182007Sroberto		    }
628182007Sroberto		}
629285612Sdelphij
630182007Sroberto		/* Note that bcount is *not* DEV_BSIZE aligned. */
631182007Sroberto		bcount = biosize;
632182007Sroberto		if ((off_t)lbn * biosize >= nsize) {
633182007Sroberto			bcount = 0;
634182007Sroberto		} else if ((off_t)(lbn + 1) * biosize > nsize) {
635182007Sroberto			bcount = nsize - (off_t)lbn * biosize;
636182007Sroberto		}
637182007Sroberto		bp = nfs_getcacheblk(vp, lbn, bcount, td);
638182007Sroberto
639285612Sdelphij		if (!bp) {
640285612Sdelphij			error = newnfs_sigintr(nmp, td);
641285612Sdelphij			return (error ? error : EINTR);
642285612Sdelphij		}
643285612Sdelphij
644285612Sdelphij		/*
645285612Sdelphij		 * If B_CACHE is not set, we must issue the read.  If this
646182007Sroberto		 * fails, we return an error.
647285612Sdelphij		 */
648285612Sdelphij
649285612Sdelphij		if ((bp->b_flags & B_CACHE) == 0) {
650285612Sdelphij		    bp->b_iocmd = BIO_READ;
651285612Sdelphij		    vfs_busy_pages(bp, 0);
652285612Sdelphij		    error = ncl_doio(vp, bp, cred, td);
653285612Sdelphij		    if (error) {
654285612Sdelphij			brelse(bp);
655285612Sdelphij			return (error);
656285612Sdelphij		    }
657285612Sdelphij		}
658285612Sdelphij
659285612Sdelphij		/*
660285612Sdelphij		 * on is the offset into the current bp.  Figure out how many
661285612Sdelphij		 * bytes we can copy out of the bp.  Note that bcount is
662285612Sdelphij		 * NOT DEV_BSIZE aligned.
663285612Sdelphij		 *
664285612Sdelphij		 * Then figure out how many bytes we can copy into the uio.
665285612Sdelphij		 */
666285612Sdelphij
667285612Sdelphij		n = 0;
668285612Sdelphij		if (on < bcount)
669285612Sdelphij			n = min((unsigned)(bcount - on), uio->uio_resid);
670285612Sdelphij		break;
671285612Sdelphij	    case VLNK:
672285612Sdelphij		NFSINCRGLOBAL(newnfsstats.biocache_readlinks);
673285612Sdelphij		bp = nfs_getcacheblk(vp, (daddr_t)0, NFS_MAXPATHLEN, td);
674285612Sdelphij		if (!bp) {
675285612Sdelphij			error = newnfs_sigintr(nmp, td);
676285612Sdelphij			return (error ? error : EINTR);
677285612Sdelphij		}
678285612Sdelphij		if ((bp->b_flags & B_CACHE) == 0) {
679285612Sdelphij		    bp->b_iocmd = BIO_READ;
680285612Sdelphij		    vfs_busy_pages(bp, 0);
68154359Sroberto		    error = ncl_doio(vp, bp, cred, td);
682285612Sdelphij		    if (error) {
68354359Sroberto			bp->b_ioflags |= BIO_ERROR;
684285612Sdelphij			brelse(bp);
685285612Sdelphij			return (error);
686285612Sdelphij		    }
687285612Sdelphij		}
688285612Sdelphij		n = min(uio->uio_resid, NFS_MAXPATHLEN - bp->b_resid);
689285612Sdelphij		on = 0;
690285612Sdelphij		break;
691285612Sdelphij	    case VDIR:
692285612Sdelphij		NFSINCRGLOBAL(newnfsstats.biocache_readdirs);
693285612Sdelphij		if (np->n_direofoffset
694285612Sdelphij		    && uio->uio_offset >= np->n_direofoffset) {
695285612Sdelphij		    return (0);
696285612Sdelphij		}
697285612Sdelphij		lbn = (uoff_t)uio->uio_offset / NFS_DIRBLKSIZ;
698285612Sdelphij		on = uio->uio_offset & (NFS_DIRBLKSIZ - 1);
699285612Sdelphij		bp = nfs_getcacheblk(vp, lbn, NFS_DIRBLKSIZ, td);
700285612Sdelphij		if (!bp) {
701285612Sdelphij		    error = newnfs_sigintr(nmp, td);
702285612Sdelphij		    return (error ? error : EINTR);
703285612Sdelphij		}
704285612Sdelphij		if ((bp->b_flags & B_CACHE) == 0) {
705285612Sdelphij		    bp->b_iocmd = BIO_READ;
706285612Sdelphij		    vfs_busy_pages(bp, 0);
707285612Sdelphij		    error = ncl_doio(vp, bp, cred, td);
708285612Sdelphij		    if (error) {
70954359Sroberto			    brelse(bp);
710285612Sdelphij		    }
711285612Sdelphij		    while (error == NFSERR_BAD_COOKIE) {
712285612Sdelphij			ncl_invaldir(vp);
713310419Sdelphij			error = ncl_vinvalbuf(vp, 0, td, 1);
714285612Sdelphij			/*
715285612Sdelphij			 * Yuck! The directory has been modified on the
71654359Sroberto			 * server. The only way to get the block is by
717285612Sdelphij			 * reading from the beginning to get all the
71854359Sroberto			 * offset cookies.
719182007Sroberto			 *
72054359Sroberto			 * Leave the last bp intact unless there is an error.
72154359Sroberto			 * Loop back up to the while if the error is another
72254359Sroberto			 * NFSERR_BAD_COOKIE (double yuch!).
723285612Sdelphij			 */
72454359Sroberto			for (i = 0; i <= lbn && !error; i++) {
72554359Sroberto			    if (np->n_direofoffset
72654359Sroberto				&& (i * NFS_DIRBLKSIZ) >= np->n_direofoffset)
72754359Sroberto				    return (0);
72854359Sroberto			    bp = nfs_getcacheblk(vp, i, NFS_DIRBLKSIZ, td);
72954359Sroberto			    if (!bp) {
730285612Sdelphij				error = newnfs_sigintr(nmp, td);
731285612Sdelphij				return (error ? error : EINTR);
73254359Sroberto			    }
73354359Sroberto			    if ((bp->b_flags & B_CACHE) == 0) {
73454359Sroberto				    bp->b_iocmd = BIO_READ;
735293650Sglebius				    vfs_busy_pages(bp, 0);
736293650Sglebius				    error = ncl_doio(vp, bp, cred, td);
737293650Sglebius				    /*
73854359Sroberto				     * no error + B_INVAL == directory EOF,
73954359Sroberto				     * use the block.
740182007Sroberto				     */
741182007Sroberto				    if (error == 0 && (bp->b_flags & B_INVAL))
742285612Sdelphij					    break;
743285612Sdelphij			    }
744285612Sdelphij			    /*
74554359Sroberto			     * An error will throw away the block and the
74654359Sroberto			     * for loop will break out.  If no error and this
74754359Sroberto			     * is not the block we want, we throw away the
74854359Sroberto			     * block and go for the next one via the for loop.
74982498Sroberto			     */
75054359Sroberto			    if (error || i < lbn)
75182498Sroberto				    brelse(bp);
75282498Sroberto			}
75382498Sroberto		    }
75482498Sroberto		    /*
75582498Sroberto		     * The above while is repeated if we hit another cookie
75682498Sroberto		     * error.  If we hit an error and it wasn't a cookie error,
75782498Sroberto		     * we give up.
758285612Sdelphij		     */
759285612Sdelphij		    if (error)
760285612Sdelphij			    return (error);
761285612Sdelphij		}
762285612Sdelphij
763285612Sdelphij		/*
764285612Sdelphij		 * If not eof and read aheads are enabled, start one.
76582498Sroberto		 * (You need the current block first, so that you have the
766285612Sdelphij		 *  directory offset cookie of the next block.)
767285612Sdelphij		 */
768285612Sdelphij		if (nmp->nm_readahead > 0 &&
769285612Sdelphij		    (bp->b_flags & B_INVAL) == 0 &&
77054359Sroberto		    (np->n_direofoffset == 0 ||
771182007Sroberto		    (lbn + 1) * NFS_DIRBLKSIZ < np->n_direofoffset) &&
772182007Sroberto		    incore(&vp->v_bufobj, lbn + 1) == NULL) {
773285612Sdelphij			rabp = nfs_getcacheblk(vp, lbn + 1, NFS_DIRBLKSIZ, td);
774182007Sroberto			if (rabp) {
775182007Sroberto			    if ((rabp->b_flags & (B_CACHE|B_DELWRI)) == 0) {
776182007Sroberto				rabp->b_flags |= B_ASYNC;
777285612Sdelphij				rabp->b_iocmd = BIO_READ;
778182007Sroberto				vfs_busy_pages(rabp, 0);
779285612Sdelphij				if (ncl_asyncio(nmp, rabp, cred, td)) {
780285612Sdelphij				    rabp->b_flags |= B_INVAL;
781285612Sdelphij				    rabp->b_ioflags |= BIO_ERROR;
782285612Sdelphij				    vfs_unbusy_pages(rabp);
783285612Sdelphij				    brelse(rabp);
784285612Sdelphij				}
785285612Sdelphij			    } else {
786285612Sdelphij				brelse(rabp);
787285612Sdelphij			    }
788285612Sdelphij			}
789285612Sdelphij		}
790182007Sroberto		/*
79154359Sroberto		 * Unlike VREG files, whos buffer size ( bp->b_bcount ) is
792285612Sdelphij		 * chopped for the EOF condition, we cannot tell how large
793182007Sroberto		 * NFS directories are going to be until we hit EOF.  So
794182007Sroberto		 * an NFS directory buffer is *not* chopped to its EOF.  Now,
795182007Sroberto		 * it just so happens that b_resid will effectively chop it
796182007Sroberto		 * to EOF.  *BUT* this information is lost if the buffer goes
797182007Sroberto		 * away and is reconstituted into a B_CACHE state ( due to
798182007Sroberto		 * being VMIO ) later.  So we keep track of the directory eof
799182007Sroberto		 * in np->n_direofoffset and chop it off as an extra step
800182007Sroberto		 * right here.
801182007Sroberto		 */
802182007Sroberto		n = lmin(uio->uio_resid, NFS_DIRBLKSIZ - bp->b_resid - on);
803182007Sroberto		if (np->n_direofoffset && n > np->n_direofoffset - uio->uio_offset)
804182007Sroberto			n = np->n_direofoffset - uio->uio_offset;
80582498Sroberto		break;
80682498Sroberto	    default:
807182007Sroberto		ncl_printf(" ncl_bioread: type %x unexpected\n", vp->v_type);
808182007Sroberto		bp = NULL;
809182007Sroberto		break;
810182007Sroberto	    };
811182007Sroberto
812182007Sroberto	    if (n > 0) {
81354359Sroberto		    error = uiomove(bp->b_data + on, (int)n, uio);
81454359Sroberto	    }
815182007Sroberto	    if (vp->v_type == VLNK)
81654359Sroberto		n = 0;
81754359Sroberto	    if (bp != NULL)
81854359Sroberto		brelse(bp);
81954359Sroberto	} while (error == 0 && uio->uio_resid > 0 && n > 0);
82054359Sroberto	return (error);
821285612Sdelphij}
822285612Sdelphij
823285612Sdelphij/*
824285612Sdelphij * The NFS write path cannot handle iovecs with len > 1. So we need to
825285612Sdelphij * break up iovecs accordingly (restricting them to wsize).
826285612Sdelphij * For the SYNC case, we can do this with 1 copy (user buffer -> mbuf).
82754359Sroberto * For the ASYNC case, 2 copies are needed. The first a copy from the
82854359Sroberto * user buffer to a staging buffer and then a second copy from the staging
82954359Sroberto * buffer to mbufs. This can be optimized by copying from the user buffer
83054359Sroberto * directly into mbufs and passing the chain down, but that requires a
83154359Sroberto * fair amount of re-working of the relevant codepaths (and can be done
83254359Sroberto * later).
833182007Sroberto */
834182007Srobertostatic int
835182007Srobertonfs_directio_write(vp, uiop, cred, ioflag)
836182007Sroberto	struct vnode *vp;
837182007Sroberto	struct uio *uiop;
838182007Sroberto	struct ucred *cred;
839182007Sroberto	int ioflag;
840182007Sroberto{
841285612Sdelphij	int error;
842182007Sroberto	struct nfsmount *nmp = VFSTONFS(vp->v_mount);
843182007Sroberto	struct thread *td = uiop->uio_td;
844182007Sroberto	int size;
845182007Sroberto	int wsize;
846182007Sroberto
847182007Sroberto	mtx_lock(&nmp->nm_mtx);
848182007Sroberto	wsize = nmp->nm_wsize;
849182007Sroberto	mtx_unlock(&nmp->nm_mtx);
850182007Sroberto	if (ioflag & IO_SYNC) {
851182007Sroberto		int iomode, must_commit;
852182007Sroberto		struct uio uio;
853182007Sroberto		struct iovec iov;
854182007Srobertodo_sync:
855182007Sroberto		while (uiop->uio_resid > 0) {
856182007Sroberto			size = min(uiop->uio_resid, wsize);
857182007Sroberto			size = min(uiop->uio_iov->iov_len, size);
858182007Sroberto			iov.iov_base = uiop->uio_iov->iov_base;
859182007Sroberto			iov.iov_len = size;
860182007Sroberto			uio.uio_iov = &iov;
861182007Sroberto			uio.uio_iovcnt = 1;
862182007Sroberto			uio.uio_offset = uiop->uio_offset;
863182007Sroberto			uio.uio_resid = size;
864182007Sroberto			uio.uio_segflg = UIO_USERSPACE;
865182007Sroberto			uio.uio_rw = UIO_WRITE;
86654359Sroberto			uio.uio_td = td;
86754359Sroberto			iomode = NFSWRITE_FILESYNC;
86854359Sroberto			error = ncl_writerpc(vp, &uio, cred, &iomode,
869182007Sroberto			    &must_commit);
87054359Sroberto			KASSERT((must_commit == 0),
871182007Sroberto				("ncl_directio_write: Did not commit write"));
872182007Sroberto			if (error)
873182007Sroberto				return (error);
87454359Sroberto			uiop->uio_offset += size;
87554359Sroberto			uiop->uio_resid -= size;
87654359Sroberto			if (uiop->uio_iov->iov_len <= size) {
877182007Sroberto				uiop->uio_iovcnt--;
878182007Sroberto				uiop->uio_iov++;
87954359Sroberto			} else {
880285612Sdelphij				uiop->uio_iov->iov_base =
881285612Sdelphij					(char *)uiop->uio_iov->iov_base + size;
882285612Sdelphij				uiop->uio_iov->iov_len -= size;
883285612Sdelphij			}
884285612Sdelphij		}
885285612Sdelphij	} else {
88654359Sroberto		struct uio *t_uio;
887285612Sdelphij		struct iovec *t_iov;
888285612Sdelphij		struct buf *bp;
889285612Sdelphij
890285612Sdelphij		/*
891285612Sdelphij		 * Break up the write into blocksize chunks and hand these
892285612Sdelphij		 * over to nfsiod's for write back.
893285612Sdelphij		 * Unfortunately, this incurs a copy of the data. Since
894285612Sdelphij		 * the user could modify the buffer before the write is
895285612Sdelphij		 * initiated.
89654359Sroberto		 *
89754359Sroberto		 * The obvious optimization here is that one of the 2 copies
89854359Sroberto		 * in the async write path can be eliminated by copying the
89954359Sroberto		 * data here directly into mbufs and passing the mbuf chain
90054359Sroberto		 * down. But that will require a fair amount of re-working
90154359Sroberto		 * of the code and can be done if there's enough interest
90254359Sroberto		 * in NFS directio access.
90354359Sroberto		 */
90454359Sroberto		while (uiop->uio_resid > 0) {
905285612Sdelphij			size = min(uiop->uio_resid, wsize);
906285612Sdelphij			size = min(uiop->uio_iov->iov_len, size);
907285612Sdelphij			bp = getpbuf(&ncl_pbuf_freecnt);
908285612Sdelphij			t_uio = malloc(sizeof(struct uio), M_NFSDIRECTIO, M_WAITOK);
909285612Sdelphij			t_iov = malloc(sizeof(struct iovec), M_NFSDIRECTIO, M_WAITOK);
910285612Sdelphij			t_iov->iov_base = malloc(size, M_NFSDIRECTIO, M_WAITOK);
91154359Sroberto			t_iov->iov_len = size;
91254359Sroberto			t_uio->uio_iov = t_iov;
91354359Sroberto			t_uio->uio_iovcnt = 1;
91454359Sroberto			t_uio->uio_offset = uiop->uio_offset;
91554359Sroberto			t_uio->uio_resid = size;
91654359Sroberto			t_uio->uio_segflg = UIO_SYSSPACE;
917182007Sroberto			t_uio->uio_rw = UIO_WRITE;
918182007Sroberto			t_uio->uio_td = td;
91954359Sroberto			bcopy(uiop->uio_iov->iov_base, t_iov->iov_base, size);
920182007Sroberto			bp->b_flags |= B_DIRECT;
921182007Sroberto			bp->b_iocmd = BIO_WRITE;
922182007Sroberto			if (cred != NOCRED) {
923182007Sroberto				crhold(cred);
924182007Sroberto				bp->b_wcred = cred;
925182007Sroberto			} else
926182007Sroberto				bp->b_wcred = NOCRED;
927182007Sroberto			bp->b_caller1 = (void *)t_uio;
928182007Sroberto			bp->b_vp = vp;
929182007Sroberto			error = ncl_asyncio(nmp, bp, NOCRED, td);
930182007Sroberto			if (error) {
931182007Sroberto				free(t_iov->iov_base, M_NFSDIRECTIO);
932182007Sroberto				free(t_iov, M_NFSDIRECTIO);
933182007Sroberto				free(t_uio, M_NFSDIRECTIO);
934182007Sroberto				bp->b_vp = NULL;
935182007Sroberto				relpbuf(bp, &ncl_pbuf_freecnt);
936182007Sroberto				if (error == EINTR)
937182007Sroberto					return (error);
938182007Sroberto				goto do_sync;
939182007Sroberto			}
940182007Sroberto			uiop->uio_offset += size;
941182007Sroberto			uiop->uio_resid -= size;
942182007Sroberto			if (uiop->uio_iov->iov_len <= size) {
943182007Sroberto				uiop->uio_iovcnt--;
944182007Sroberto				uiop->uio_iov++;
945182007Sroberto			} else {
94654359Sroberto				uiop->uio_iov->iov_base =
947182007Sroberto					(char *)uiop->uio_iov->iov_base + size;
94854359Sroberto				uiop->uio_iov->iov_len -= size;
949182007Sroberto			}
950182007Sroberto		}
951182007Sroberto	}
95254359Sroberto	return (0);
953182007Sroberto}
95454359Sroberto
95554359Sroberto/*
956285612Sdelphij * Vnode op for write using bio
957285612Sdelphij */
958285612Sdelphijint
959285612Sdelphijncl_write(struct vop_write_args *ap)
960285612Sdelphij{
96154359Sroberto	int biosize;
96254359Sroberto	struct uio *uio = ap->a_uio;
96354359Sroberto	struct thread *td = uio->uio_td;
96454359Sroberto	struct vnode *vp = ap->a_vp;
96554359Sroberto	struct nfsnode *np = VTONFS(vp);
96654359Sroberto	struct ucred *cred = ap->a_cred;
96754359Sroberto	int ioflag = ap->a_ioflag;
96854359Sroberto	struct buf *bp;
96954359Sroberto	struct vattr vattr;
970285612Sdelphij	struct nfsmount *nmp = VFSTONFS(vp->v_mount);
971285612Sdelphij	daddr_t lbn;
972285612Sdelphij	int bcount;
973285612Sdelphij	int n, on, error = 0;
974285612Sdelphij	struct proc *p = td?td->td_proc:NULL;
975285612Sdelphij
97654359Sroberto#ifdef DIAGNOSTIC
977182007Sroberto	if (uio->uio_rw != UIO_WRITE)
978182007Sroberto		panic("ncl_write mode");
97954359Sroberto	if (uio->uio_segflg == UIO_USERSPACE && uio->uio_td != curthread)
98054359Sroberto		panic("ncl_write proc");
981285612Sdelphij#endif
982285612Sdelphij	if (vp->v_type != VREG)
983285612Sdelphij		return (EIO);
984285612Sdelphij	mtx_lock(&np->n_mtx);
98554359Sroberto	if (np->n_flag & NWRITEERR) {
98654359Sroberto		np->n_flag &= ~NWRITEERR;
987182007Sroberto		mtx_unlock(&np->n_mtx);
98854359Sroberto		return (np->n_error);
98954359Sroberto	} else
99054359Sroberto		mtx_unlock(&np->n_mtx);
991182007Sroberto	mtx_lock(&nmp->nm_mtx);
99254359Sroberto	if ((nmp->nm_flag & NFSMNT_NFSV3) != 0 &&
99354359Sroberto	    (nmp->nm_state & NFSSTA_GOTFSINFO) == 0) {
99454359Sroberto		mtx_unlock(&nmp->nm_mtx);
99554359Sroberto		(void)ncl_fsinfo(nmp, vp, cred, td);
99654359Sroberto		mtx_lock(&nmp->nm_mtx);
99754359Sroberto	}
998285612Sdelphij	if (nmp->nm_wsize == 0)
999285612Sdelphij		(void) newnfs_iosize(nmp);
100054359Sroberto	mtx_unlock(&nmp->nm_mtx);
100154359Sroberto
100254359Sroberto	/*
1003182007Sroberto	 * Synchronously flush pending buffers if we are in synchronous
1004182007Sroberto	 * mode or if we are appending.
100554359Sroberto	 */
100654359Sroberto	if (ioflag & (IO_APPEND | IO_SYNC)) {
1007182007Sroberto		mtx_lock(&np->n_mtx);
1008285612Sdelphij		if (np->n_flag & NMODIFIED) {
1009182007Sroberto			mtx_unlock(&np->n_mtx);
1010285612Sdelphij#ifdef notyet /* Needs matching nonblock semantics elsewhere, too. */
101154359Sroberto			/*
1012285612Sdelphij			 * Require non-blocking, synchronous writes to
101354359Sroberto			 * dirty files to inform the program it needs
1014285612Sdelphij			 * to fsync(2) explicitly.
101554359Sroberto			 */
1016182007Sroberto			if (ioflag & IO_NDELAY)
101754359Sroberto				return (EAGAIN);
101854359Sroberto#endif
101954359Srobertoflush_and_restart:
102054359Sroberto			np->n_attrstamp = 0;
102154359Sroberto			error = ncl_vinvalbuf(vp, V_SAVE, td, 1);
102254359Sroberto			if (error)
102354359Sroberto				return (error);
102454359Sroberto		} else
102554359Sroberto			mtx_unlock(&np->n_mtx);
102654359Sroberto	}
102754359Sroberto
1028285612Sdelphij	/*
1029285612Sdelphij	 * If IO_APPEND then load uio_offset.  We restart here if we cannot
103054359Sroberto	 * get the append lock.
103154359Sroberto	 */
103254359Sroberto	if (ioflag & IO_APPEND) {
103354359Sroberto		np->n_attrstamp = 0;
103454359Sroberto		error = VOP_GETATTR(vp, &vattr, cred);
103554359Sroberto		if (error)
103654359Sroberto			return (error);
103754359Sroberto		mtx_lock(&np->n_mtx);
103854359Sroberto		uio->uio_offset = np->n_size;
103954359Sroberto		mtx_unlock(&np->n_mtx);
104054359Sroberto	}
104154359Sroberto
104254359Sroberto	if (uio->uio_offset < 0)
1043182007Sroberto		return (EINVAL);
104454359Sroberto	if ((uio->uio_offset + uio->uio_resid) > nmp->nm_maxfilesize)
104554359Sroberto		return (EFBIG);
1046182007Sroberto	if (uio->uio_resid == 0)
1047330567Sgordon		return (0);
1048182007Sroberto
1049285612Sdelphij	if (newnfs_directio_enable && (ioflag & IO_DIRECT) && vp->v_type == VREG)
105082498Sroberto		return nfs_directio_write(vp, uio, cred, ioflag);
1051182007Sroberto
1052289997Sglebius	/*
105354359Sroberto	 * Maybe this should be above the vnode op call, but so long as
105454359Sroberto	 * file servers have no limits, i don't think it matters
105554359Sroberto	 */
105654359Sroberto	if (p != NULL) {
105754359Sroberto		PROC_LOCK(p);
1058285612Sdelphij		if (uio->uio_offset + uio->uio_resid >
105954359Sroberto		    lim_cur(p, RLIMIT_FSIZE)) {
106054359Sroberto			psignal(p, SIGXFSZ);
106154359Sroberto			PROC_UNLOCK(p);
106254359Sroberto			return (EFBIG);
106354359Sroberto		}
1064182007Sroberto		PROC_UNLOCK(p);
106554359Sroberto	}
1066182007Sroberto
106754359Sroberto	biosize = vp->v_mount->mnt_stat.f_iosize;
106854359Sroberto	/*
106954359Sroberto	 * Find all of this file's B_NEEDCOMMIT buffers.  If our writes
107054359Sroberto	 * would exceed the local maximum per-file write commit size when
107154359Sroberto	 * combined with those, we must decide whether to flush,
107254359Sroberto	 * go synchronous, or return error.  We don't bother checking
107354359Sroberto	 * IO_UNIT -- we just make all writes atomic anyway, as there's
107454359Sroberto	 * no point optimizing for something that really won't ever happen.
107554359Sroberto	 */
107654359Sroberto	if (!(ioflag & IO_SYNC)) {
107754359Sroberto		int nflag;
107854359Sroberto
107954359Sroberto		mtx_lock(&np->n_mtx);
108054359Sroberto		nflag = np->n_flag;
108154359Sroberto		mtx_unlock(&np->n_mtx);
108254359Sroberto		int needrestart = 0;
108354359Sroberto		if (nmp->nm_wcommitsize < uio->uio_resid) {
108454359Sroberto			/*
108554359Sroberto			 * If this request could not possibly be completed
108654359Sroberto			 * without exceeding the maximum outstanding write
108754359Sroberto			 * commit size, see if we can convert it into a
1088285612Sdelphij			 * synchronous write operation.
1089285612Sdelphij			 */
1090285612Sdelphij			if (ioflag & IO_NDELAY)
1091285612Sdelphij				return (EAGAIN);
109254359Sroberto			ioflag |= IO_SYNC;
1093285612Sdelphij			if (nflag & NMODIFIED)
1094285612Sdelphij				needrestart = 1;
109554359Sroberto		} else if (nflag & NMODIFIED) {
1096285612Sdelphij			int wouldcommit = 0;
1097285612Sdelphij			BO_LOCK(&vp->v_bufobj);
109854359Sroberto			if (vp->v_bufobj.bo_dirty.bv_cnt != 0) {
1099285612Sdelphij				TAILQ_FOREACH(bp, &vp->v_bufobj.bo_dirty.bv_hd,
1100285612Sdelphij				    b_bobufs) {
1101285612Sdelphij					if (bp->b_flags & B_NEEDCOMMIT)
1102285612Sdelphij						wouldcommit += bp->b_bcount;
1103285612Sdelphij				}
1104285612Sdelphij			}
1105285612Sdelphij			BO_UNLOCK(&vp->v_bufobj);
1106285612Sdelphij			/*
110754359Sroberto			 * Since we're not operating synchronously and
110854359Sroberto			 * bypassing the buffer cache, we are in a commit
110954359Sroberto			 * and holding all of these buffers whether
111054359Sroberto			 * transmitted or not.  If not limited, this
111154359Sroberto			 * will lead to the buffer cache deadlocking,
111254359Sroberto			 * as no one else can flush our uncommitted buffers.
111354359Sroberto			 */
111454359Sroberto			wouldcommit += uio->uio_resid;
111554359Sroberto			/*
111654359Sroberto			 * If we would initially exceed the maximum
111754359Sroberto			 * outstanding write commit size, flush and restart.
1118285612Sdelphij			 */
111954359Sroberto			if (wouldcommit > nmp->nm_wcommitsize)
112054359Sroberto				needrestart = 1;
112154359Sroberto		}
112254359Sroberto		if (needrestart)
112354359Sroberto			goto flush_and_restart;
112454359Sroberto	}
112554359Sroberto
112654359Sroberto	do {
112754359Sroberto		NFSINCRGLOBAL(newnfsstats.biocache_writes);
112854359Sroberto		lbn = uio->uio_offset / biosize;
112954359Sroberto		on = uio->uio_offset & (biosize-1);
113054359Sroberto		n = min((unsigned)(biosize - on), uio->uio_resid);
113154359Srobertoagain:
113254359Sroberto		/*
113354359Sroberto		 * Handle direct append and file extension cases, calculate
113454359Sroberto		 * unaligned buffer size.
113554359Sroberto		 */
113654359Sroberto		mtx_lock(&np->n_mtx);
113754359Sroberto		if (uio->uio_offset == np->n_size && n) {
113854359Sroberto			mtx_unlock(&np->n_mtx);
1139285612Sdelphij			/*
114054359Sroberto			 * Get the buffer (in its pre-append state to maintain
114154359Sroberto			 * B_CACHE if it was previously set).  Resize the
114254359Sroberto			 * nfsnode after we have locked the buffer to prevent
114354359Sroberto			 * readers from reading garbage.
114454359Sroberto			 */
1145285612Sdelphij			bcount = on;
114654359Sroberto			bp = nfs_getcacheblk(vp, lbn, bcount, td);
1147285612Sdelphij
114854359Sroberto			if (bp != NULL) {
114954359Sroberto				long save;
115054359Sroberto
115154359Sroberto				mtx_lock(&np->n_mtx);
115254359Sroberto				np->n_size = uio->uio_offset + n;
115354359Sroberto				np->n_flag |= NMODIFIED;
1154182007Sroberto				vnode_pager_setsize(vp, np->n_size);
115554359Sroberto				mtx_unlock(&np->n_mtx);
115654359Sroberto
1157182007Sroberto				save = bp->b_flags & B_CACHE;
1158330567Sgordon				bcount += n;
1159285612Sdelphij				allocbuf(bp, bcount);
1160285612Sdelphij				bp->b_flags |= save;
116154359Sroberto			}
1162182007Sroberto		} else {
116354359Sroberto			/*
116454359Sroberto			 * Obtain the locked cache block first, and then
116554359Sroberto			 * adjust the file's size as appropriate.
116654359Sroberto			 */
116754359Sroberto			bcount = on + n;
116854359Sroberto			if ((off_t)lbn * biosize + bcount < np->n_size) {
116954359Sroberto				if ((off_t)(lbn + 1) * biosize < np->n_size)
117054359Sroberto					bcount = biosize;
117154359Sroberto				else
117254359Sroberto					bcount = np->n_size - (off_t)lbn * biosize;
117354359Sroberto			}
117454359Sroberto			mtx_unlock(&np->n_mtx);
1175132451Sroberto			bp = nfs_getcacheblk(vp, lbn, bcount, td);
117654359Sroberto			mtx_lock(&np->n_mtx);
117754359Sroberto			if (uio->uio_offset + n > np->n_size) {
117854359Sroberto				np->n_size = uio->uio_offset + n;
117954359Sroberto				np->n_flag |= NMODIFIED;
118054359Sroberto				vnode_pager_setsize(vp, np->n_size);
1181285612Sdelphij			}
1182285612Sdelphij			mtx_unlock(&np->n_mtx);
118354359Sroberto		}
118454359Sroberto
118554359Sroberto		if (!bp) {
118654359Sroberto			error = newnfs_sigintr(nmp, td);
118754359Sroberto			if (!error)
118854359Sroberto				error = EINTR;
118954359Sroberto			break;
119054359Sroberto		}
1191285612Sdelphij
1192285612Sdelphij		/*
1193285612Sdelphij		 * Issue a READ if B_CACHE is not set.  In special-append
1194285612Sdelphij		 * mode, B_CACHE is based on the buffer prior to the write
1195285612Sdelphij		 * op and is typically set, avoiding the read.  If a read
1196285612Sdelphij		 * is required in special append mode, the server will
1197285612Sdelphij		 * probably send us a short-read since we extended the file
1198285612Sdelphij		 * on our end, resulting in b_resid == 0 and, thusly,
1199285612Sdelphij		 * B_CACHE getting set.
1200285612Sdelphij		 *
1201285612Sdelphij		 * We can also avoid issuing the read if the write covers
1202285612Sdelphij		 * the entire buffer.  We have to make sure the buffer state
1203285612Sdelphij		 * is reasonable in this case since we will not be initiating
1204285612Sdelphij		 * I/O.  See the comments in kern/vfs_bio.c's getblk() for
1205285612Sdelphij		 * more information.
1206285612Sdelphij		 *
1207285612Sdelphij		 * B_CACHE may also be set due to the buffer being cached
1208285612Sdelphij		 * normally.
1209285612Sdelphij		 */
1210285612Sdelphij
1211310419Sdelphij		if (on == 0 && n == bcount) {
1212285612Sdelphij			bp->b_flags |= B_CACHE;
1213285612Sdelphij			bp->b_flags &= ~B_INVAL;
1214285612Sdelphij			bp->b_ioflags &= ~BIO_ERROR;
1215285612Sdelphij		}
1216285612Sdelphij
1217285612Sdelphij		if ((bp->b_flags & B_CACHE) == 0) {
1218285612Sdelphij			bp->b_iocmd = BIO_READ;
1219285612Sdelphij			vfs_busy_pages(bp, 0);
1220285612Sdelphij			error = ncl_doio(vp, bp, cred, td);
1221285612Sdelphij			if (error) {
1222285612Sdelphij				brelse(bp);
1223285612Sdelphij				break;
1224285612Sdelphij			}
1225285612Sdelphij		}
1226285612Sdelphij		if (bp->b_wcred == NOCRED)
1227285612Sdelphij			bp->b_wcred = crhold(cred);
1228285612Sdelphij		mtx_lock(&np->n_mtx);
1229285612Sdelphij		np->n_flag |= NMODIFIED;
1230285612Sdelphij		mtx_unlock(&np->n_mtx);
1231285612Sdelphij
1232285612Sdelphij		/*
1233285612Sdelphij		 * If dirtyend exceeds file size, chop it down.  This should
1234285612Sdelphij		 * not normally occur but there is an append race where it
1235285612Sdelphij		 * might occur XXX, so we log it.
1236285612Sdelphij		 *
1237285612Sdelphij		 * If the chopping creates a reverse-indexed or degenerate
1238285612Sdelphij		 * situation with dirtyoff/end, we 0 both of them.
1239285612Sdelphij		 */
1240285612Sdelphij
1241285612Sdelphij		if (bp->b_dirtyend > bcount) {
1242285612Sdelphij			ncl_printf("NFS append race @%lx:%d\n",
1243285612Sdelphij			    (long)bp->b_blkno * DEV_BSIZE,
1244285612Sdelphij			    bp->b_dirtyend - bcount);
1245285612Sdelphij			bp->b_dirtyend = bcount;
1246285612Sdelphij		}
1247285612Sdelphij
1248285612Sdelphij		if (bp->b_dirtyoff >= bp->b_dirtyend)
1249285612Sdelphij			bp->b_dirtyoff = bp->b_dirtyend = 0;
1250330567Sgordon
1251330567Sgordon		/*
1252330567Sgordon		 * If the new write will leave a contiguous dirty
1253330567Sgordon		 * area, just update the b_dirtyoff and b_dirtyend,
1254285612Sdelphij		 * otherwise force a write rpc of the old dirty area.
1255285612Sdelphij		 *
1256285612Sdelphij		 * While it is possible to merge discontiguous writes due to
1257285612Sdelphij		 * our having a B_CACHE buffer ( and thus valid read data
1258330567Sgordon		 * for the hole), we don't because it could lead to
1259330567Sgordon		 * significant cache coherency problems with multiple clients,
1260330567Sgordon		 * especially if locking is implemented later on.
1261330567Sgordon		 *
1262330567Sgordon		 * as an optimization we could theoretically maintain
1263330567Sgordon		 * a linked list of discontinuous areas, but we would still
1264330567Sgordon		 * have to commit them separately so there isn't much
1265330567Sgordon		 * advantage to it except perhaps a bit of asynchronization.
1266330567Sgordon		 */
1267285612Sdelphij
1268285612Sdelphij		if (bp->b_dirtyend > 0 &&
1269285612Sdelphij		    (on > bp->b_dirtyend || (on + n) < bp->b_dirtyoff)) {
1270285612Sdelphij			if (bwrite(bp) == EINTR) {
1271285612Sdelphij				error = EINTR;
1272285612Sdelphij				break;
1273285612Sdelphij			}
1274285612Sdelphij			goto again;
1275285612Sdelphij		}
1276285612Sdelphij
1277285612Sdelphij		error = uiomove((char *)bp->b_data + on, n, uio);
1278285612Sdelphij
1279285612Sdelphij		/*
1280285612Sdelphij		 * Since this block is being modified, it must be written
1281285612Sdelphij		 * again and not just committed.  Since write clustering does
1282285612Sdelphij		 * not work for the stage 1 data write, only the stage 2
1283285612Sdelphij		 * commit rpc, we have to clear B_CLUSTEROK as well.
1284285612Sdelphij		 */
1285285612Sdelphij		bp->b_flags &= ~(B_NEEDCOMMIT | B_CLUSTEROK);
1286285612Sdelphij
1287285612Sdelphij		if (error) {
1288285612Sdelphij			bp->b_ioflags |= BIO_ERROR;
1289285612Sdelphij			brelse(bp);
1290310419Sdelphij			break;
1291285612Sdelphij		}
1292285612Sdelphij
1293285612Sdelphij		/*
1294285612Sdelphij		 * Only update dirtyoff/dirtyend if not a degenerate
1295285612Sdelphij		 * condition.
1296285612Sdelphij		 */
1297285612Sdelphij		if (n) {
1298285612Sdelphij			if (bp->b_dirtyend > 0) {
1299285612Sdelphij				bp->b_dirtyoff = min(on, bp->b_dirtyoff);
1300285612Sdelphij				bp->b_dirtyend = max((on + n), bp->b_dirtyend);
1301285612Sdelphij			} else {
1302285612Sdelphij				bp->b_dirtyoff = on;
1303285612Sdelphij				bp->b_dirtyend = on + n;
1304285612Sdelphij			}
1305310419Sdelphij			vfs_bio_set_valid(bp, on, n);
1306285612Sdelphij		}
1307285612Sdelphij
1308285612Sdelphij		/*
1309285612Sdelphij		 * If IO_SYNC do bwrite().
1310285612Sdelphij		 *
1311285612Sdelphij		 * IO_INVAL appears to be unused.  The idea appears to be
1312310419Sdelphij		 * to turn off caching in this case.  Very odd.  XXX
1313285612Sdelphij		 */
1314285612Sdelphij		if ((ioflag & IO_SYNC)) {
1315285612Sdelphij			if (ioflag & IO_INVAL)
1316285612Sdelphij				bp->b_flags |= B_NOCACHE;
1317285612Sdelphij			error = bwrite(bp);
1318285612Sdelphij			if (error)
1319285612Sdelphij				break;
1320310419Sdelphij		} else if ((n + on) == biosize) {
1321310419Sdelphij			bp->b_flags |= B_ASYNC;
1322310419Sdelphij			(void) ncl_writebp(bp, 0, NULL);
1323310419Sdelphij		} else {
1324310419Sdelphij			bdwrite(bp);
1325310419Sdelphij		}
1326310419Sdelphij	} while (uio->uio_resid > 0 && n > 0);
1327310419Sdelphij
1328310419Sdelphij	return (error);
1329310419Sdelphij}
1330310419Sdelphij
1331310419Sdelphij/*
1332310419Sdelphij * Get an nfs cache block.
1333310419Sdelphij *
1334310419Sdelphij * Allocate a new one if the block isn't currently in the cache
1335310419Sdelphij * and return the block marked busy. If the calling process is
1336310419Sdelphij * interrupted by a signal for an interruptible mount point, return
1337310419Sdelphij * NULL.
1338310419Sdelphij *
1339310419Sdelphij * The caller must carefully deal with the possible B_INVAL state of
1340285612Sdelphij * the buffer.  ncl_doio() clears B_INVAL (and ncl_asyncio() clears it
1341285612Sdelphij * indirectly), so synchronous reads can be issued without worrying about
1342285612Sdelphij * the B_INVAL state.  We have to be a little more careful when dealing
1343285612Sdelphij * with writes (see comments in nfs_write()) when extending a file past
1344310419Sdelphij * its EOF.
1345310419Sdelphij */
1346310419Sdelphijstatic struct buf *
1347285612Sdelphijnfs_getcacheblk(struct vnode *vp, daddr_t bn, int size, struct thread *td)
1348310419Sdelphij{
1349310419Sdelphij	struct buf *bp;
1350310419Sdelphij	struct mount *mp;
1351310419Sdelphij	struct nfsmount *nmp;
1352310419Sdelphij
1353310419Sdelphij	mp = vp->v_mount;
1354310419Sdelphij	nmp = VFSTONFS(mp);
1355310419Sdelphij
1356310419Sdelphij	if (nmp->nm_flag & NFSMNT_INT) {
1357310419Sdelphij 		sigset_t oldset;
1358310419Sdelphij
1359310419Sdelphij 		ncl_set_sigmask(td, &oldset);
1360310419Sdelphij		bp = getblk(vp, bn, size, PCATCH, 0, 0);
1361310419Sdelphij 		ncl_restore_sigmask(td, &oldset);
1362310419Sdelphij		while (bp == NULL) {
1363310419Sdelphij			if (newnfs_sigintr(nmp, td))
1364310419Sdelphij				return (NULL);
1365310419Sdelphij			bp = getblk(vp, bn, size, 0, 2 * hz, 0);
1366310419Sdelphij		}
1367310419Sdelphij	} else {
1368285612Sdelphij		bp = getblk(vp, bn, size, 0, 0, 0);
1369285612Sdelphij	}
1370310419Sdelphij
1371310419Sdelphij	if (vp->v_type == VREG) {
1372285612Sdelphij		int biosize;
1373285612Sdelphij
1374285612Sdelphij		biosize = mp->mnt_stat.f_iosize;
1375310419Sdelphij		bp->b_blkno = bn * (biosize / DEV_BSIZE);
1376285612Sdelphij	}
1377285612Sdelphij	return (bp);
1378285612Sdelphij}
1379285612Sdelphij
138054359Sroberto/*
1381 * Flush and invalidate all dirty buffers. If another process is already
1382 * doing the flush, just wait for completion.
1383 */
1384int
1385ncl_vinvalbuf(struct vnode *vp, int flags, struct thread *td, int intrflg)
1386{
1387	struct nfsnode *np = VTONFS(vp);
1388	struct nfsmount *nmp = VFSTONFS(vp->v_mount);
1389	int error = 0, slpflag, slptimeo;
1390 	int old_lock = 0;
1391
1392	ASSERT_VOP_LOCKED(vp, "ncl_vinvalbuf");
1393
1394	if ((nmp->nm_flag & NFSMNT_INT) == 0)
1395		intrflg = 0;
1396	if ((nmp->nm_mountp->mnt_kern_flag & MNTK_UNMOUNTF))
1397		intrflg = 1;
1398	if (intrflg) {
1399		slpflag = PCATCH;
1400		slptimeo = 2 * hz;
1401	} else {
1402		slpflag = 0;
1403		slptimeo = 0;
1404	}
1405
1406	old_lock = ncl_upgrade_vnlock(vp);
1407	if (vp->v_iflag & VI_DOOMED) {
1408		/*
1409		 * Since vgonel() uses the generic vinvalbuf() to flush
1410		 * dirty buffers and it does not call this function, it
1411		 * is safe to just return OK when VI_DOOMED is set.
1412		 */
1413		ncl_downgrade_vnlock(vp, old_lock);
1414		return (0);
1415	}
1416
1417	/*
1418	 * Now, flush as required.
1419	 */
1420	if ((flags & V_SAVE) && (vp->v_bufobj.bo_object != NULL)) {
1421		VM_OBJECT_LOCK(vp->v_bufobj.bo_object);
1422		vm_object_page_clean(vp->v_bufobj.bo_object, 0, 0, OBJPC_SYNC);
1423		VM_OBJECT_UNLOCK(vp->v_bufobj.bo_object);
1424		/*
1425		 * If the page clean was interrupted, fail the invalidation.
1426		 * Not doing so, we run the risk of losing dirty pages in the
1427		 * vinvalbuf() call below.
1428		 */
1429		if (intrflg && (error = newnfs_sigintr(nmp, td)))
1430			goto out;
1431	}
1432
1433	error = vinvalbuf(vp, flags, slpflag, 0);
1434	while (error) {
1435		if (intrflg && (error = newnfs_sigintr(nmp, td)))
1436			goto out;
1437		error = vinvalbuf(vp, flags, 0, slptimeo);
1438	}
1439	mtx_lock(&np->n_mtx);
1440	if (np->n_directio_asyncwr == 0)
1441		np->n_flag &= ~NMODIFIED;
1442	mtx_unlock(&np->n_mtx);
1443out:
1444	ncl_downgrade_vnlock(vp, old_lock);
1445	return error;
1446}
1447
1448/*
1449 * Initiate asynchronous I/O. Return an error if no nfsiods are available.
1450 * This is mainly to avoid queueing async I/O requests when the nfsiods
1451 * are all hung on a dead server.
1452 *
1453 * Note: ncl_asyncio() does not clear (BIO_ERROR|B_INVAL) but when the bp
1454 * is eventually dequeued by the async daemon, ncl_doio() *will*.
1455 */
1456int
1457ncl_asyncio(struct nfsmount *nmp, struct buf *bp, struct ucred *cred, struct thread *td)
1458{
1459	int iod;
1460	int gotiod;
1461	int slpflag = 0;
1462	int slptimeo = 0;
1463	int error, error2;
1464
1465	/*
1466	 * Unless iothreadcnt is set > 0, don't bother with async I/O
1467	 * threads. For LAN environments, they don't buy any significant
1468	 * performance improvement that you can't get with large block
1469	 * sizes.
1470	 */
1471	if (nmp->nm_readahead == 0)
1472		return (EPERM);
1473
1474	/*
1475	 * Commits are usually short and sweet so lets save some cpu and
1476	 * leave the async daemons for more important rpc's (such as reads
1477	 * and writes).
1478	 */
1479	mtx_lock(&ncl_iod_mutex);
1480	if (bp->b_iocmd == BIO_WRITE && (bp->b_flags & B_NEEDCOMMIT) &&
1481	    (nmp->nm_bufqiods > ncl_numasync / 2)) {
1482		mtx_unlock(&ncl_iod_mutex);
1483		return(EIO);
1484	}
1485again:
1486	if (nmp->nm_flag & NFSMNT_INT)
1487		slpflag = PCATCH;
1488	gotiod = FALSE;
1489
1490	/*
1491	 * Find a free iod to process this request.
1492	 */
1493	for (iod = 0; iod < ncl_numasync; iod++)
1494		if (ncl_iodwant[iod]) {
1495			gotiod = TRUE;
1496			break;
1497		}
1498
1499	/*
1500	 * Try to create one if none are free.
1501	 */
1502	if (!gotiod) {
1503		iod = ncl_nfsiodnew();
1504		if (iod != -1)
1505			gotiod = TRUE;
1506	}
1507
1508	if (gotiod) {
1509		/*
1510		 * Found one, so wake it up and tell it which
1511		 * mount to process.
1512		 */
1513		NFS_DPF(ASYNCIO, ("ncl_asyncio: waking iod %d for mount %p\n",
1514		    iod, nmp));
1515		ncl_iodwant[iod] = NULL;
1516		ncl_iodmount[iod] = nmp;
1517		nmp->nm_bufqiods++;
1518		wakeup(&ncl_iodwant[iod]);
1519	}
1520
1521	/*
1522	 * If none are free, we may already have an iod working on this mount
1523	 * point.  If so, it will process our request.
1524	 */
1525	if (!gotiod) {
1526		if (nmp->nm_bufqiods > 0) {
1527			NFS_DPF(ASYNCIO,
1528				("ncl_asyncio: %d iods are already processing mount %p\n",
1529				 nmp->nm_bufqiods, nmp));
1530			gotiod = TRUE;
1531		}
1532	}
1533
1534	/*
1535	 * If we have an iod which can process the request, then queue
1536	 * the buffer.
1537	 */
1538	if (gotiod) {
1539		/*
1540		 * Ensure that the queue never grows too large.  We still want
1541		 * to asynchronize so we block rather then return EIO.
1542		 */
1543		while (nmp->nm_bufqlen >= 2*ncl_numasync) {
1544			NFS_DPF(ASYNCIO,
1545				("ncl_asyncio: waiting for mount %p queue to drain\n", nmp));
1546			nmp->nm_bufqwant = TRUE;
1547 			error = ncl_msleep(td, &nmp->nm_bufq, &ncl_iod_mutex,
1548					   slpflag | PRIBIO,
1549 					   "nfsaio", slptimeo);
1550			if (error) {
1551				error2 = newnfs_sigintr(nmp, td);
1552				if (error2) {
1553					mtx_unlock(&ncl_iod_mutex);
1554					return (error2);
1555				}
1556				if (slpflag == PCATCH) {
1557					slpflag = 0;
1558					slptimeo = 2 * hz;
1559				}
1560			}
1561			/*
1562			 * We might have lost our iod while sleeping,
1563			 * so check and loop if nescessary.
1564			 */
1565			if (nmp->nm_bufqiods == 0) {
1566				NFS_DPF(ASYNCIO,
1567					("ncl_asyncio: no iods after mount %p queue was drained, looping\n", nmp));
1568				goto again;
1569			}
1570		}
1571
1572		/* We might have lost our nfsiod */
1573		if (nmp->nm_bufqiods == 0) {
1574			NFS_DPF(ASYNCIO,
1575				("ncl_asyncio: no iods after mount %p queue was drained, looping\n", nmp));
1576			goto again;
1577		}
1578
1579		if (bp->b_iocmd == BIO_READ) {
1580			if (bp->b_rcred == NOCRED && cred != NOCRED)
1581				bp->b_rcred = crhold(cred);
1582		} else {
1583			if (bp->b_wcred == NOCRED && cred != NOCRED)
1584				bp->b_wcred = crhold(cred);
1585		}
1586
1587		if (bp->b_flags & B_REMFREE)
1588			bremfreef(bp);
1589		BUF_KERNPROC(bp);
1590		TAILQ_INSERT_TAIL(&nmp->nm_bufq, bp, b_freelist);
1591		nmp->nm_bufqlen++;
1592		if ((bp->b_flags & B_DIRECT) && bp->b_iocmd == BIO_WRITE) {
1593			mtx_lock(&(VTONFS(bp->b_vp))->n_mtx);
1594			VTONFS(bp->b_vp)->n_flag |= NMODIFIED;
1595			VTONFS(bp->b_vp)->n_directio_asyncwr++;
1596			mtx_unlock(&(VTONFS(bp->b_vp))->n_mtx);
1597		}
1598		mtx_unlock(&ncl_iod_mutex);
1599		return (0);
1600	}
1601
1602	mtx_unlock(&ncl_iod_mutex);
1603
1604	/*
1605	 * All the iods are busy on other mounts, so return EIO to
1606	 * force the caller to process the i/o synchronously.
1607	 */
1608	NFS_DPF(ASYNCIO, ("ncl_asyncio: no iods available, i/o is synchronous\n"));
1609	return (EIO);
1610}
1611
1612void
1613ncl_doio_directwrite(struct buf *bp)
1614{
1615	int iomode, must_commit;
1616	struct uio *uiop = (struct uio *)bp->b_caller1;
1617	char *iov_base = uiop->uio_iov->iov_base;
1618
1619	iomode = NFSWRITE_FILESYNC;
1620	uiop->uio_td = NULL; /* NULL since we're in nfsiod */
1621	ncl_writerpc(bp->b_vp, uiop, bp->b_wcred, &iomode, &must_commit);
1622	KASSERT((must_commit == 0), ("ncl_doio_directwrite: Did not commit write"));
1623	free(iov_base, M_NFSDIRECTIO);
1624	free(uiop->uio_iov, M_NFSDIRECTIO);
1625	free(uiop, M_NFSDIRECTIO);
1626	if ((bp->b_flags & B_DIRECT) && bp->b_iocmd == BIO_WRITE) {
1627		struct nfsnode *np = VTONFS(bp->b_vp);
1628		mtx_lock(&np->n_mtx);
1629		np->n_directio_asyncwr--;
1630		if (np->n_directio_asyncwr == 0) {
1631			np->n_flag &= ~NMODIFIED;
1632			if ((np->n_flag & NFSYNCWAIT)) {
1633				np->n_flag &= ~NFSYNCWAIT;
1634				wakeup((caddr_t)&np->n_directio_asyncwr);
1635			}
1636		}
1637		mtx_unlock(&np->n_mtx);
1638	}
1639	bp->b_vp = NULL;
1640	relpbuf(bp, &ncl_pbuf_freecnt);
1641}
1642
1643/*
1644 * Do an I/O operation to/from a cache block. This may be called
1645 * synchronously or from an nfsiod.
1646 */
1647int
1648ncl_doio(struct vnode *vp, struct buf *bp, struct ucred *cr, struct thread *td)
1649{
1650	struct uio *uiop;
1651	struct nfsnode *np;
1652	struct nfsmount *nmp;
1653	int error = 0, iomode, must_commit = 0;
1654	struct uio uio;
1655	struct iovec io;
1656	struct proc *p = td ? td->td_proc : NULL;
1657	uint8_t	iocmd;
1658
1659	np = VTONFS(vp);
1660	nmp = VFSTONFS(vp->v_mount);
1661	uiop = &uio;
1662	uiop->uio_iov = &io;
1663	uiop->uio_iovcnt = 1;
1664	uiop->uio_segflg = UIO_SYSSPACE;
1665	uiop->uio_td = td;
1666
1667	/*
1668	 * clear BIO_ERROR and B_INVAL state prior to initiating the I/O.  We
1669	 * do this here so we do not have to do it in all the code that
1670	 * calls us.
1671	 */
1672	bp->b_flags &= ~B_INVAL;
1673	bp->b_ioflags &= ~BIO_ERROR;
1674
1675	KASSERT(!(bp->b_flags & B_DONE), ("ncl_doio: bp %p already marked done", bp));
1676	iocmd = bp->b_iocmd;
1677	if (iocmd == BIO_READ) {
1678	    io.iov_len = uiop->uio_resid = bp->b_bcount;
1679	    io.iov_base = bp->b_data;
1680	    uiop->uio_rw = UIO_READ;
1681
1682	    switch (vp->v_type) {
1683	    case VREG:
1684		uiop->uio_offset = ((off_t)bp->b_blkno) * DEV_BSIZE;
1685		NFSINCRGLOBAL(newnfsstats.read_bios);
1686		error = ncl_readrpc(vp, uiop, cr);
1687
1688		if (!error) {
1689		    if (uiop->uio_resid) {
1690			/*
1691			 * If we had a short read with no error, we must have
1692			 * hit a file hole.  We should zero-fill the remainder.
1693			 * This can also occur if the server hits the file EOF.
1694			 *
1695			 * Holes used to be able to occur due to pending
1696			 * writes, but that is not possible any longer.
1697			 */
1698			int nread = bp->b_bcount - uiop->uio_resid;
1699			int left  = uiop->uio_resid;
1700
1701			if (left > 0)
1702				bzero((char *)bp->b_data + nread, left);
1703			uiop->uio_resid = 0;
1704		    }
1705		}
1706		/* ASSERT_VOP_LOCKED(vp, "ncl_doio"); */
1707		if (p && (vp->v_vflag & VV_TEXT)) {
1708			mtx_lock(&np->n_mtx);
1709			if (NFS_TIMESPEC_COMPARE(&np->n_mtime, &np->n_vattr.na_mtime)) {
1710				mtx_unlock(&np->n_mtx);
1711				PROC_LOCK(p);
1712				killproc(p, "text file modification");
1713				PROC_UNLOCK(p);
1714			} else
1715				mtx_unlock(&np->n_mtx);
1716		}
1717		break;
1718	    case VLNK:
1719		uiop->uio_offset = (off_t)0;
1720		NFSINCRGLOBAL(newnfsstats.readlink_bios);
1721		error = ncl_readlinkrpc(vp, uiop, cr);
1722		break;
1723	    case VDIR:
1724		NFSINCRGLOBAL(newnfsstats.readdir_bios);
1725		uiop->uio_offset = ((u_quad_t)bp->b_lblkno) * NFS_DIRBLKSIZ;
1726		if ((nmp->nm_flag & NFSMNT_RDIRPLUS) != 0) {
1727			error = ncl_readdirplusrpc(vp, uiop, cr, td);
1728			if (error == NFSERR_NOTSUPP)
1729				nmp->nm_flag &= ~NFSMNT_RDIRPLUS;
1730		}
1731		if ((nmp->nm_flag & NFSMNT_RDIRPLUS) == 0)
1732			error = ncl_readdirrpc(vp, uiop, cr, td);
1733		/*
1734		 * end-of-directory sets B_INVAL but does not generate an
1735		 * error.
1736		 */
1737		if (error == 0 && uiop->uio_resid == bp->b_bcount)
1738			bp->b_flags |= B_INVAL;
1739		break;
1740	    default:
1741		ncl_printf("ncl_doio:  type %x unexpected\n", vp->v_type);
1742		break;
1743	    };
1744	    if (error) {
1745		bp->b_ioflags |= BIO_ERROR;
1746		bp->b_error = error;
1747	    }
1748	} else {
1749	    /*
1750	     * If we only need to commit, try to commit
1751	     */
1752	    if (bp->b_flags & B_NEEDCOMMIT) {
1753		    int retv;
1754		    off_t off;
1755
1756		    off = ((u_quad_t)bp->b_blkno) * DEV_BSIZE + bp->b_dirtyoff;
1757		    retv = ncl_commit(vp, off, bp->b_dirtyend-bp->b_dirtyoff,
1758			bp->b_wcred, td);
1759		    if (retv == 0) {
1760			    bp->b_dirtyoff = bp->b_dirtyend = 0;
1761			    bp->b_flags &= ~(B_NEEDCOMMIT | B_CLUSTEROK);
1762			    bp->b_resid = 0;
1763			    bufdone(bp);
1764			    return (0);
1765		    }
1766		    if (retv == NFSERR_STALEWRITEVERF) {
1767			    ncl_clearcommit(vp->v_mount);
1768		    }
1769	    }
1770
1771	    /*
1772	     * Setup for actual write
1773	     */
1774	    mtx_lock(&np->n_mtx);
1775	    if ((off_t)bp->b_blkno * DEV_BSIZE + bp->b_dirtyend > np->n_size)
1776		bp->b_dirtyend = np->n_size - (off_t)bp->b_blkno * DEV_BSIZE;
1777	    mtx_unlock(&np->n_mtx);
1778
1779	    if (bp->b_dirtyend > bp->b_dirtyoff) {
1780		io.iov_len = uiop->uio_resid = bp->b_dirtyend
1781		    - bp->b_dirtyoff;
1782		uiop->uio_offset = (off_t)bp->b_blkno * DEV_BSIZE
1783		    + bp->b_dirtyoff;
1784		io.iov_base = (char *)bp->b_data + bp->b_dirtyoff;
1785		uiop->uio_rw = UIO_WRITE;
1786		NFSINCRGLOBAL(newnfsstats.write_bios);
1787
1788		if ((bp->b_flags & (B_ASYNC | B_NEEDCOMMIT | B_NOCACHE | B_CLUSTER)) == B_ASYNC)
1789		    iomode = NFSWRITE_UNSTABLE;
1790		else
1791		    iomode = NFSWRITE_FILESYNC;
1792
1793		error = ncl_writerpc(vp, uiop, cr, &iomode, &must_commit);
1794
1795		/*
1796		 * When setting B_NEEDCOMMIT also set B_CLUSTEROK to try
1797		 * to cluster the buffers needing commit.  This will allow
1798		 * the system to submit a single commit rpc for the whole
1799		 * cluster.  We can do this even if the buffer is not 100%
1800		 * dirty (relative to the NFS blocksize), so we optimize the
1801		 * append-to-file-case.
1802		 *
1803		 * (when clearing B_NEEDCOMMIT, B_CLUSTEROK must also be
1804		 * cleared because write clustering only works for commit
1805		 * rpc's, not for the data portion of the write).
1806		 */
1807
1808		if (!error && iomode == NFSWRITE_UNSTABLE) {
1809		    bp->b_flags |= B_NEEDCOMMIT;
1810		    if (bp->b_dirtyoff == 0
1811			&& bp->b_dirtyend == bp->b_bcount)
1812			bp->b_flags |= B_CLUSTEROK;
1813		} else {
1814		    bp->b_flags &= ~(B_NEEDCOMMIT | B_CLUSTEROK);
1815		}
1816
1817		/*
1818		 * For an interrupted write, the buffer is still valid
1819		 * and the write hasn't been pushed to the server yet,
1820		 * so we can't set BIO_ERROR and report the interruption
1821		 * by setting B_EINTR. For the B_ASYNC case, B_EINTR
1822		 * is not relevant, so the rpc attempt is essentially
1823		 * a noop.  For the case of a V3 write rpc not being
1824		 * committed to stable storage, the block is still
1825		 * dirty and requires either a commit rpc or another
1826		 * write rpc with iomode == NFSV3WRITE_FILESYNC before
1827		 * the block is reused. This is indicated by setting
1828		 * the B_DELWRI and B_NEEDCOMMIT flags.
1829		 *
1830		 * If the buffer is marked B_PAGING, it does not reside on
1831		 * the vp's paging queues so we cannot call bdirty().  The
1832		 * bp in this case is not an NFS cache block so we should
1833		 * be safe. XXX
1834		 *
1835		 * The logic below breaks up errors into recoverable and
1836		 * unrecoverable. For the former, we clear B_INVAL|B_NOCACHE
1837		 * and keep the buffer around for potential write retries.
1838		 * For the latter (eg ESTALE), we toss the buffer away (B_INVAL)
1839		 * and save the error in the nfsnode. This is less than ideal
1840		 * but necessary. Keeping such buffers around could potentially
1841		 * cause buffer exhaustion eventually (they can never be written
1842		 * out, so will get constantly be re-dirtied). It also causes
1843		 * all sorts of vfs panics. For non-recoverable write errors,
1844		 * also invalidate the attrcache, so we'll be forced to go over
1845		 * the wire for this object, returning an error to user on next
1846		 * call (most of the time).
1847		 */
1848    		if (error == EINTR || error == EIO || error == ETIMEDOUT
1849		    || (!error && (bp->b_flags & B_NEEDCOMMIT))) {
1850			int s;
1851
1852			s = splbio();
1853			bp->b_flags &= ~(B_INVAL|B_NOCACHE);
1854			if ((bp->b_flags & B_PAGING) == 0) {
1855			    bdirty(bp);
1856			    bp->b_flags &= ~B_DONE;
1857			}
1858			if (error && (bp->b_flags & B_ASYNC) == 0)
1859			    bp->b_flags |= B_EINTR;
1860			splx(s);
1861	    	} else {
1862		    if (error) {
1863			bp->b_ioflags |= BIO_ERROR;
1864			bp->b_flags |= B_INVAL;
1865			bp->b_error = np->n_error = error;
1866			mtx_lock(&np->n_mtx);
1867			np->n_flag |= NWRITEERR;
1868			np->n_attrstamp = 0;
1869			mtx_unlock(&np->n_mtx);
1870		    }
1871		    bp->b_dirtyoff = bp->b_dirtyend = 0;
1872		}
1873	    } else {
1874		bp->b_resid = 0;
1875		bufdone(bp);
1876		return (0);
1877	    }
1878	}
1879	bp->b_resid = uiop->uio_resid;
1880	if (must_commit)
1881	    ncl_clearcommit(vp->v_mount);
1882	bufdone(bp);
1883	return (error);
1884}
1885
1886/*
1887 * Used to aid in handling ftruncate() operations on the NFS client side.
1888 * Truncation creates a number of special problems for NFS.  We have to
1889 * throw away VM pages and buffer cache buffers that are beyond EOF, and
1890 * we have to properly handle VM pages or (potentially dirty) buffers
1891 * that straddle the truncation point.
1892 */
1893
1894int
1895ncl_meta_setsize(struct vnode *vp, struct ucred *cred, struct thread *td, u_quad_t nsize)
1896{
1897	struct nfsnode *np = VTONFS(vp);
1898	u_quad_t tsize;
1899	int biosize = vp->v_mount->mnt_stat.f_iosize;
1900	int error = 0;
1901
1902	mtx_lock(&np->n_mtx);
1903	tsize = np->n_size;
1904	np->n_size = nsize;
1905	mtx_unlock(&np->n_mtx);
1906
1907	if (nsize < tsize) {
1908		struct buf *bp;
1909		daddr_t lbn;
1910		int bufsize;
1911
1912		/*
1913		 * vtruncbuf() doesn't get the buffer overlapping the
1914		 * truncation point.  We may have a B_DELWRI and/or B_CACHE
1915		 * buffer that now needs to be truncated.
1916		 */
1917		error = vtruncbuf(vp, cred, td, nsize, biosize);
1918		lbn = nsize / biosize;
1919		bufsize = nsize & (biosize - 1);
1920		bp = nfs_getcacheblk(vp, lbn, bufsize, td);
1921 		if (!bp)
1922 			return EINTR;
1923		if (bp->b_dirtyoff > bp->b_bcount)
1924			bp->b_dirtyoff = bp->b_bcount;
1925		if (bp->b_dirtyend > bp->b_bcount)
1926			bp->b_dirtyend = bp->b_bcount;
1927		bp->b_flags |= B_RELBUF;  /* don't leave garbage around */
1928		brelse(bp);
1929	} else {
1930		vnode_pager_setsize(vp, nsize);
1931	}
1932	return(error);
1933}
1934
1935