nfs_clbio.c revision 194425
1191783Srmacklem/*-
2191783Srmacklem * Copyright (c) 1989, 1993
3191783Srmacklem *	The Regents of the University of California.  All rights reserved.
4191783Srmacklem *
5191783Srmacklem * This code is derived from software contributed to Berkeley by
6191783Srmacklem * Rick Macklem at The University of Guelph.
7191783Srmacklem *
8191783Srmacklem * Redistribution and use in source and binary forms, with or without
9191783Srmacklem * modification, are permitted provided that the following conditions
10191783Srmacklem * are met:
11191783Srmacklem * 1. Redistributions of source code must retain the above copyright
12191783Srmacklem *    notice, this list of conditions and the following disclaimer.
13191783Srmacklem * 2. Redistributions in binary form must reproduce the above copyright
14191783Srmacklem *    notice, this list of conditions and the following disclaimer in the
15191783Srmacklem *    documentation and/or other materials provided with the distribution.
16191783Srmacklem * 4. Neither the name of the University nor the names of its contributors
17191783Srmacklem *    may be used to endorse or promote products derived from this software
18191783Srmacklem *    without specific prior written permission.
19191783Srmacklem *
20191783Srmacklem * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
21191783Srmacklem * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22191783Srmacklem * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23191783Srmacklem * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
24191783Srmacklem * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25191783Srmacklem * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
26191783Srmacklem * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27191783Srmacklem * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28191783Srmacklem * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
29191783Srmacklem * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30191783Srmacklem * SUCH DAMAGE.
31191783Srmacklem *
32191783Srmacklem *	@(#)nfs_bio.c	8.9 (Berkeley) 3/30/95
33191783Srmacklem */
34191783Srmacklem
35191783Srmacklem#include <sys/cdefs.h>
36191783Srmacklem__FBSDID("$FreeBSD: head/sys/fs/nfsclient/nfs_clbio.c 194425 2009-06-18 05:56:24Z alc $");
37191783Srmacklem
38191783Srmacklem#include <sys/param.h>
39191783Srmacklem#include <sys/systm.h>
40191783Srmacklem#include <sys/bio.h>
41191783Srmacklem#include <sys/buf.h>
42191783Srmacklem#include <sys/kernel.h>
43191783Srmacklem#include <sys/mount.h>
44191783Srmacklem#include <sys/proc.h>
45191783Srmacklem#include <sys/resourcevar.h>
46191783Srmacklem#include <sys/signalvar.h>
47191783Srmacklem#include <sys/vmmeter.h>
48191783Srmacklem#include <sys/vnode.h>
49191783Srmacklem
50191783Srmacklem#include <vm/vm.h>
51191783Srmacklem#include <vm/vm_extern.h>
52191783Srmacklem#include <vm/vm_page.h>
53191783Srmacklem#include <vm/vm_object.h>
54191783Srmacklem#include <vm/vm_pager.h>
55191783Srmacklem#include <vm/vnode_pager.h>
56191783Srmacklem
57191783Srmacklem#include <fs/nfs/nfsport.h>
58191783Srmacklem#include <fs/nfsclient/nfsmount.h>
59191783Srmacklem#include <fs/nfsclient/nfs.h>
60191783Srmacklem#include <fs/nfsclient/nfsnode.h>
61191783Srmacklem
62191783Srmacklemextern int newnfs_directio_allow_mmap;
63191783Srmacklemextern struct nfsstats newnfsstats;
64191783Srmacklemextern struct mtx ncl_iod_mutex;
65191783Srmacklemextern int ncl_numasync;
66191783Srmacklemextern struct proc *ncl_iodwant[NFS_MAXRAHEAD];
67191783Srmacklemextern struct nfsmount *ncl_iodmount[NFS_MAXRAHEAD];
68191783Srmacklemextern int newnfs_directio_enable;
69191783Srmacklem
70191783Srmacklemint ncl_pbuf_freecnt = -1;	/* start out unlimited */
71191783Srmacklem
72191783Srmacklemstatic struct buf *nfs_getcacheblk(struct vnode *vp, daddr_t bn, int size,
73191783Srmacklem    struct thread *td);
74191783Srmacklemstatic int nfs_directio_write(struct vnode *vp, struct uio *uiop,
75191783Srmacklem    struct ucred *cred, int ioflag);
76191783Srmacklem
77191783Srmacklem/*
78191783Srmacklem * Any signal that can interrupt an NFS operation in an intr mount
79191783Srmacklem * should be added to this set. SIGSTOP and SIGKILL cannot be masked.
80191783Srmacklem */
81191783Srmacklemstatic int nfs_sig_set[] = {
82191783Srmacklem	SIGINT,
83191783Srmacklem	SIGTERM,
84191783Srmacklem	SIGHUP,
85191783Srmacklem	SIGKILL,
86191783Srmacklem	SIGSTOP,
87191783Srmacklem	SIGQUIT
88191783Srmacklem};
89191783Srmacklem
90191783Srmacklem#ifdef notnow
91191783Srmacklem/*
92191783Srmacklem * Check to see if one of the signals in our subset is pending on
93191783Srmacklem * the process (in an intr mount).
94191783Srmacklem */
95191783Srmacklemint
96191783Srmacklemncl_sig_pending(sigset_t set)
97191783Srmacklem{
98191783Srmacklem	int i;
99191783Srmacklem
100191783Srmacklem	for (i = 0 ; i < sizeof(nfs_sig_set)/sizeof(int) ; i++)
101191783Srmacklem		if (SIGISMEMBER(set, nfs_sig_set[i]))
102191783Srmacklem			return (1);
103191783Srmacklem	return (0);
104191783Srmacklem}
105191783Srmacklem#endif
106191783Srmacklem
107191783Srmacklem/*
108191783Srmacklem * The set/restore sigmask functions are used to (temporarily) overwrite
109191783Srmacklem * the process p_sigmask during an RPC call (for example). These are also
110191783Srmacklem * used in other places in the NFS client that might tsleep().
111191783Srmacklem */
112191783Srmacklemstatic void
113191783Srmacklemncl_set_sigmask(struct thread *td, sigset_t *oldset)
114191783Srmacklem{
115191783Srmacklem	sigset_t newset;
116191783Srmacklem	int i;
117191783Srmacklem	struct proc *p;
118191783Srmacklem
119191783Srmacklem	SIGFILLSET(newset);
120191783Srmacklem	if (td == NULL)
121191783Srmacklem		td = curthread; /* XXX */
122191783Srmacklem	p = td->td_proc;
123191783Srmacklem	/* Remove the NFS set of signals from newset */
124191783Srmacklem	PROC_LOCK(p);
125191783Srmacklem	mtx_lock(&p->p_sigacts->ps_mtx);
126191783Srmacklem	for (i = 0 ; i < sizeof(nfs_sig_set)/sizeof(int) ; i++) {
127191783Srmacklem		/*
128191783Srmacklem		 * But make sure we leave the ones already masked
129191783Srmacklem		 * by the process, ie. remove the signal from the
130191783Srmacklem		 * temporary signalmask only if it wasn't already
131191783Srmacklem		 * in p_sigmask.
132191783Srmacklem		 */
133191783Srmacklem		if (!SIGISMEMBER(td->td_sigmask, nfs_sig_set[i]) &&
134191783Srmacklem		    !SIGISMEMBER(p->p_sigacts->ps_sigignore, nfs_sig_set[i]))
135191783Srmacklem			SIGDELSET(newset, nfs_sig_set[i]);
136191783Srmacklem	}
137191783Srmacklem	mtx_unlock(&p->p_sigacts->ps_mtx);
138191783Srmacklem	PROC_UNLOCK(p);
139191783Srmacklem	kern_sigprocmask(td, SIG_SETMASK, &newset, oldset, 0);
140191783Srmacklem}
141191783Srmacklem
142191783Srmacklemstatic void
143191783Srmacklemncl_restore_sigmask(struct thread *td, sigset_t *set)
144191783Srmacklem{
145191783Srmacklem	if (td == NULL)
146191783Srmacklem		td = curthread; /* XXX */
147191783Srmacklem	kern_sigprocmask(td, SIG_SETMASK, set, NULL, 0);
148191783Srmacklem}
149191783Srmacklem
150191783Srmacklem/*
151191783Srmacklem * NFS wrapper to msleep(), that shoves a new p_sigmask and restores the
152191783Srmacklem * old one after msleep() returns.
153191783Srmacklem */
154191783Srmacklemint
155191783Srmacklemncl_msleep(struct thread *td, void *ident, struct mtx *mtx, int priority, char *wmesg, int timo)
156191783Srmacklem{
157191783Srmacklem	sigset_t oldset;
158191783Srmacklem	int error;
159191783Srmacklem	struct proc *p;
160191783Srmacklem
161191783Srmacklem	if ((priority & PCATCH) == 0)
162191783Srmacklem		return msleep(ident, mtx, priority, wmesg, timo);
163191783Srmacklem	if (td == NULL)
164191783Srmacklem		td = curthread; /* XXX */
165191783Srmacklem	ncl_set_sigmask(td, &oldset);
166191783Srmacklem	error = msleep(ident, mtx, priority, wmesg, timo);
167191783Srmacklem	ncl_restore_sigmask(td, &oldset);
168191783Srmacklem	p = td->td_proc;
169191783Srmacklem	return (error);
170191783Srmacklem}
171191783Srmacklem
172191783Srmacklem/*
173191783Srmacklem * Vnode op for VM getpages.
174191783Srmacklem */
175191783Srmacklemint
176191783Srmacklemncl_getpages(struct vop_getpages_args *ap)
177191783Srmacklem{
178191783Srmacklem	int i, error, nextoff, size, toff, count, npages;
179191783Srmacklem	struct uio uio;
180191783Srmacklem	struct iovec iov;
181191783Srmacklem	vm_offset_t kva;
182191783Srmacklem	struct buf *bp;
183191783Srmacklem	struct vnode *vp;
184191783Srmacklem	struct thread *td;
185191783Srmacklem	struct ucred *cred;
186191783Srmacklem	struct nfsmount *nmp;
187191783Srmacklem	vm_object_t object;
188191783Srmacklem	vm_page_t *pages;
189191783Srmacklem	struct nfsnode *np;
190191783Srmacklem
191191783Srmacklem	vp = ap->a_vp;
192191783Srmacklem	np = VTONFS(vp);
193191783Srmacklem	td = curthread;				/* XXX */
194191783Srmacklem	cred = curthread->td_ucred;		/* XXX */
195191783Srmacklem	nmp = VFSTONFS(vp->v_mount);
196191783Srmacklem	pages = ap->a_m;
197191783Srmacklem	count = ap->a_count;
198191783Srmacklem
199191783Srmacklem	if ((object = vp->v_object) == NULL) {
200191783Srmacklem		ncl_printf("nfs_getpages: called with non-merged cache vnode??\n");
201194425Salc		return (VM_PAGER_ERROR);
202191783Srmacklem	}
203191783Srmacklem
204191783Srmacklem	if (newnfs_directio_enable && !newnfs_directio_allow_mmap) {
205191783Srmacklem		mtx_lock(&np->n_mtx);
206191783Srmacklem		if ((np->n_flag & NNONCACHE) && (vp->v_type == VREG)) {
207191783Srmacklem			mtx_unlock(&np->n_mtx);
208191783Srmacklem			ncl_printf("nfs_getpages: called on non-cacheable vnode??\n");
209194425Salc			return (VM_PAGER_ERROR);
210191783Srmacklem		} else
211191783Srmacklem			mtx_unlock(&np->n_mtx);
212191783Srmacklem	}
213191783Srmacklem
214191783Srmacklem	mtx_lock(&nmp->nm_mtx);
215191783Srmacklem	if ((nmp->nm_flag & NFSMNT_NFSV3) != 0 &&
216191783Srmacklem	    (nmp->nm_state & NFSSTA_GOTFSINFO) == 0) {
217191783Srmacklem		mtx_unlock(&nmp->nm_mtx);
218191783Srmacklem		/* We'll never get here for v4, because we always have fsinfo */
219191783Srmacklem		(void)ncl_fsinfo(nmp, vp, cred, td);
220191783Srmacklem	} else
221191783Srmacklem		mtx_unlock(&nmp->nm_mtx);
222191783Srmacklem
223191783Srmacklem	npages = btoc(count);
224191783Srmacklem
225191783Srmacklem	/*
226191783Srmacklem	 * If the requested page is partially valid, just return it and
227191783Srmacklem	 * allow the pager to zero-out the blanks.  Partially valid pages
228191783Srmacklem	 * can only occur at the file EOF.
229191783Srmacklem	 */
230194425Salc	VM_OBJECT_LOCK(object);
231194425Salc	if (pages[ap->a_reqpage]->valid != 0) {
232194425Salc		vm_page_lock_queues();
233194425Salc		for (i = 0; i < npages; ++i) {
234194425Salc			if (i != ap->a_reqpage)
235194425Salc				vm_page_free(pages[i]);
236191783Srmacklem		}
237194425Salc		vm_page_unlock_queues();
238191783Srmacklem		VM_OBJECT_UNLOCK(object);
239194425Salc		return (0);
240191783Srmacklem	}
241194425Salc	VM_OBJECT_UNLOCK(object);
242191783Srmacklem
243191783Srmacklem	/*
244191783Srmacklem	 * We use only the kva address for the buffer, but this is extremely
245191783Srmacklem	 * convienient and fast.
246191783Srmacklem	 */
247191783Srmacklem	bp = getpbuf(&ncl_pbuf_freecnt);
248191783Srmacklem
249191783Srmacklem	kva = (vm_offset_t) bp->b_data;
250191783Srmacklem	pmap_qenter(kva, pages, npages);
251191783Srmacklem	PCPU_INC(cnt.v_vnodein);
252191783Srmacklem	PCPU_ADD(cnt.v_vnodepgsin, npages);
253191783Srmacklem
254191783Srmacklem	iov.iov_base = (caddr_t) kva;
255191783Srmacklem	iov.iov_len = count;
256191783Srmacklem	uio.uio_iov = &iov;
257191783Srmacklem	uio.uio_iovcnt = 1;
258191783Srmacklem	uio.uio_offset = IDX_TO_OFF(pages[0]->pindex);
259191783Srmacklem	uio.uio_resid = count;
260191783Srmacklem	uio.uio_segflg = UIO_SYSSPACE;
261191783Srmacklem	uio.uio_rw = UIO_READ;
262191783Srmacklem	uio.uio_td = td;
263191783Srmacklem
264191783Srmacklem	error = ncl_readrpc(vp, &uio, cred);
265191783Srmacklem	pmap_qremove(kva, npages);
266191783Srmacklem
267191783Srmacklem	relpbuf(bp, &ncl_pbuf_freecnt);
268191783Srmacklem
269191783Srmacklem	if (error && (uio.uio_resid == count)) {
270191783Srmacklem		ncl_printf("nfs_getpages: error %d\n", error);
271191783Srmacklem		VM_OBJECT_LOCK(object);
272191783Srmacklem		vm_page_lock_queues();
273191783Srmacklem		for (i = 0; i < npages; ++i) {
274191783Srmacklem			if (i != ap->a_reqpage)
275191783Srmacklem				vm_page_free(pages[i]);
276191783Srmacklem		}
277191783Srmacklem		vm_page_unlock_queues();
278191783Srmacklem		VM_OBJECT_UNLOCK(object);
279194425Salc		return (VM_PAGER_ERROR);
280191783Srmacklem	}
281191783Srmacklem
282191783Srmacklem	/*
283191783Srmacklem	 * Calculate the number of bytes read and validate only that number
284191783Srmacklem	 * of bytes.  Note that due to pending writes, size may be 0.  This
285191783Srmacklem	 * does not mean that the remaining data is invalid!
286191783Srmacklem	 */
287191783Srmacklem
288191783Srmacklem	size = count - uio.uio_resid;
289191783Srmacklem	VM_OBJECT_LOCK(object);
290191783Srmacklem	vm_page_lock_queues();
291191783Srmacklem	for (i = 0, toff = 0; i < npages; i++, toff = nextoff) {
292191783Srmacklem		vm_page_t m;
293191783Srmacklem		nextoff = toff + PAGE_SIZE;
294191783Srmacklem		m = pages[i];
295191783Srmacklem
296191783Srmacklem		if (nextoff <= size) {
297191783Srmacklem			/*
298191783Srmacklem			 * Read operation filled an entire page
299191783Srmacklem			 */
300191783Srmacklem			m->valid = VM_PAGE_BITS_ALL;
301192065Srmacklem			KASSERT(m->dirty == 0,
302192065Srmacklem			    ("nfs_getpages: page %p is dirty", m));
303191783Srmacklem		} else if (size > toff) {
304191783Srmacklem			/*
305191783Srmacklem			 * Read operation filled a partial page.
306191783Srmacklem			 */
307191783Srmacklem			m->valid = 0;
308192231Srmacklem			vm_page_set_valid(m, 0, size - toff);
309192986Salc			KASSERT(m->dirty == 0,
310192231Srmacklem			    ("nfs_getpages: page %p is dirty", m));
311191783Srmacklem		} else {
312191783Srmacklem			/*
313191783Srmacklem			 * Read operation was short.  If no error occured
314191783Srmacklem			 * we may have hit a zero-fill section.   We simply
315191783Srmacklem			 * leave valid set to 0.
316191783Srmacklem			 */
317191783Srmacklem			;
318191783Srmacklem		}
319191783Srmacklem		if (i != ap->a_reqpage) {
320191783Srmacklem			/*
321191783Srmacklem			 * Whether or not to leave the page activated is up in
322191783Srmacklem			 * the air, but we should put the page on a page queue
323191783Srmacklem			 * somewhere (it already is in the object).  Result:
324191783Srmacklem			 * It appears that emperical results show that
325191783Srmacklem			 * deactivating pages is best.
326191783Srmacklem			 */
327191783Srmacklem
328191783Srmacklem			/*
329191783Srmacklem			 * Just in case someone was asking for this page we
330191783Srmacklem			 * now tell them that it is ok to use.
331191783Srmacklem			 */
332191783Srmacklem			if (!error) {
333191783Srmacklem				if (m->oflags & VPO_WANTED)
334191783Srmacklem					vm_page_activate(m);
335191783Srmacklem				else
336191783Srmacklem					vm_page_deactivate(m);
337191783Srmacklem				vm_page_wakeup(m);
338191783Srmacklem			} else {
339191783Srmacklem				vm_page_free(m);
340191783Srmacklem			}
341191783Srmacklem		}
342191783Srmacklem	}
343191783Srmacklem	vm_page_unlock_queues();
344191783Srmacklem	VM_OBJECT_UNLOCK(object);
345194425Salc	return (0);
346191783Srmacklem}
347191783Srmacklem
348191783Srmacklem/*
349191783Srmacklem * Vnode op for VM putpages.
350191783Srmacklem */
351191783Srmacklemint
352191783Srmacklemncl_putpages(struct vop_putpages_args *ap)
353191783Srmacklem{
354191783Srmacklem	struct uio uio;
355191783Srmacklem	struct iovec iov;
356191783Srmacklem	vm_offset_t kva;
357191783Srmacklem	struct buf *bp;
358191783Srmacklem	int iomode, must_commit, i, error, npages, count;
359191783Srmacklem	off_t offset;
360191783Srmacklem	int *rtvals;
361191783Srmacklem	struct vnode *vp;
362191783Srmacklem	struct thread *td;
363191783Srmacklem	struct ucred *cred;
364191783Srmacklem	struct nfsmount *nmp;
365191783Srmacklem	struct nfsnode *np;
366191783Srmacklem	vm_page_t *pages;
367191783Srmacklem
368191783Srmacklem	vp = ap->a_vp;
369191783Srmacklem	np = VTONFS(vp);
370191783Srmacklem	td = curthread;				/* XXX */
371191783Srmacklem	cred = curthread->td_ucred;		/* XXX */
372191783Srmacklem	nmp = VFSTONFS(vp->v_mount);
373191783Srmacklem	pages = ap->a_m;
374191783Srmacklem	count = ap->a_count;
375191783Srmacklem	rtvals = ap->a_rtvals;
376191783Srmacklem	npages = btoc(count);
377191783Srmacklem	offset = IDX_TO_OFF(pages[0]->pindex);
378191783Srmacklem
379191783Srmacklem	mtx_lock(&nmp->nm_mtx);
380191783Srmacklem	if ((nmp->nm_flag & NFSMNT_NFSV3) != 0 &&
381191783Srmacklem	    (nmp->nm_state & NFSSTA_GOTFSINFO) == 0) {
382191783Srmacklem		mtx_unlock(&nmp->nm_mtx);
383191783Srmacklem		(void)ncl_fsinfo(nmp, vp, cred, td);
384191783Srmacklem	} else
385191783Srmacklem		mtx_unlock(&nmp->nm_mtx);
386191783Srmacklem
387191783Srmacklem	mtx_lock(&np->n_mtx);
388191783Srmacklem	if (newnfs_directio_enable && !newnfs_directio_allow_mmap &&
389191783Srmacklem	    (np->n_flag & NNONCACHE) && (vp->v_type == VREG)) {
390191783Srmacklem		mtx_unlock(&np->n_mtx);
391191783Srmacklem		ncl_printf("ncl_putpages: called on noncache-able vnode??\n");
392191783Srmacklem		mtx_lock(&np->n_mtx);
393191783Srmacklem	}
394191783Srmacklem
395191783Srmacklem	for (i = 0; i < npages; i++)
396191783Srmacklem		rtvals[i] = VM_PAGER_AGAIN;
397191783Srmacklem
398191783Srmacklem	/*
399191783Srmacklem	 * When putting pages, do not extend file past EOF.
400191783Srmacklem	 */
401191783Srmacklem	if (offset + count > np->n_size) {
402191783Srmacklem		count = np->n_size - offset;
403191783Srmacklem		if (count < 0)
404191783Srmacklem			count = 0;
405191783Srmacklem	}
406191783Srmacklem	mtx_unlock(&np->n_mtx);
407191783Srmacklem
408191783Srmacklem	/*
409191783Srmacklem	 * We use only the kva address for the buffer, but this is extremely
410191783Srmacklem	 * convienient and fast.
411191783Srmacklem	 */
412191783Srmacklem	bp = getpbuf(&ncl_pbuf_freecnt);
413191783Srmacklem
414191783Srmacklem	kva = (vm_offset_t) bp->b_data;
415191783Srmacklem	pmap_qenter(kva, pages, npages);
416191783Srmacklem	PCPU_INC(cnt.v_vnodeout);
417191783Srmacklem	PCPU_ADD(cnt.v_vnodepgsout, count);
418191783Srmacklem
419191783Srmacklem	iov.iov_base = (caddr_t) kva;
420191783Srmacklem	iov.iov_len = count;
421191783Srmacklem	uio.uio_iov = &iov;
422191783Srmacklem	uio.uio_iovcnt = 1;
423191783Srmacklem	uio.uio_offset = offset;
424191783Srmacklem	uio.uio_resid = count;
425191783Srmacklem	uio.uio_segflg = UIO_SYSSPACE;
426191783Srmacklem	uio.uio_rw = UIO_WRITE;
427191783Srmacklem	uio.uio_td = td;
428191783Srmacklem
429191783Srmacklem	if ((ap->a_sync & VM_PAGER_PUT_SYNC) == 0)
430191783Srmacklem	    iomode = NFSWRITE_UNSTABLE;
431191783Srmacklem	else
432191783Srmacklem	    iomode = NFSWRITE_FILESYNC;
433191783Srmacklem
434191783Srmacklem	error = ncl_writerpc(vp, &uio, cred, &iomode, &must_commit);
435191783Srmacklem
436191783Srmacklem	pmap_qremove(kva, npages);
437191783Srmacklem	relpbuf(bp, &ncl_pbuf_freecnt);
438191783Srmacklem
439191783Srmacklem	if (!error) {
440191783Srmacklem		int nwritten = round_page(count - uio.uio_resid) / PAGE_SIZE;
441191783Srmacklem		for (i = 0; i < nwritten; i++) {
442191783Srmacklem			rtvals[i] = VM_PAGER_OK;
443191783Srmacklem			vm_page_undirty(pages[i]);
444191783Srmacklem		}
445191783Srmacklem		if (must_commit) {
446191783Srmacklem			ncl_clearcommit(vp->v_mount);
447191783Srmacklem		}
448191783Srmacklem	}
449191783Srmacklem	return rtvals[0];
450191783Srmacklem}
451191783Srmacklem
452191783Srmacklem/*
453191783Srmacklem * For nfs, cache consistency can only be maintained approximately.
454191783Srmacklem * Although RFC1094 does not specify the criteria, the following is
455191783Srmacklem * believed to be compatible with the reference port.
456191783Srmacklem * For nfs:
457191783Srmacklem * If the file's modify time on the server has changed since the
458191783Srmacklem * last read rpc or you have written to the file,
459191783Srmacklem * you may have lost data cache consistency with the
460191783Srmacklem * server, so flush all of the file's data out of the cache.
461191783Srmacklem * Then force a getattr rpc to ensure that you have up to date
462191783Srmacklem * attributes.
463191783Srmacklem * NB: This implies that cache data can be read when up to
464191783Srmacklem * NFS_ATTRTIMEO seconds out of date. If you find that you need current
465191783Srmacklem * attributes this could be forced by setting n_attrstamp to 0 before
466191783Srmacklem * the VOP_GETATTR() call.
467191783Srmacklem */
468191783Srmacklemstatic inline int
469191783Srmacklemnfs_bioread_check_cons(struct vnode *vp, struct thread *td, struct ucred *cred)
470191783Srmacklem{
471191783Srmacklem	int error = 0;
472191783Srmacklem	struct vattr vattr;
473191783Srmacklem	struct nfsnode *np = VTONFS(vp);
474191783Srmacklem	int old_lock;
475191783Srmacklem
476191783Srmacklem	/*
477191783Srmacklem	 * Grab the exclusive lock before checking whether the cache is
478191783Srmacklem	 * consistent.
479191783Srmacklem	 * XXX - We can make this cheaper later (by acquiring cheaper locks).
480191783Srmacklem	 * But for now, this suffices.
481191783Srmacklem	 */
482191783Srmacklem	old_lock = ncl_upgrade_vnlock(vp);
483193955Srmacklem	if (vp->v_iflag & VI_DOOMED) {
484193955Srmacklem		ncl_downgrade_vnlock(vp, old_lock);
485193955Srmacklem		return (EBADF);
486193955Srmacklem	}
487193955Srmacklem
488191783Srmacklem	mtx_lock(&np->n_mtx);
489191783Srmacklem	if (np->n_flag & NMODIFIED) {
490191783Srmacklem		mtx_unlock(&np->n_mtx);
491191783Srmacklem		if (vp->v_type != VREG) {
492191783Srmacklem			if (vp->v_type != VDIR)
493191783Srmacklem				panic("nfs: bioread, not dir");
494191783Srmacklem			ncl_invaldir(vp);
495191783Srmacklem			error = ncl_vinvalbuf(vp, V_SAVE, td, 1);
496191783Srmacklem			if (error)
497191783Srmacklem				goto out;
498191783Srmacklem		}
499191783Srmacklem		np->n_attrstamp = 0;
500191783Srmacklem		error = VOP_GETATTR(vp, &vattr, cred);
501191783Srmacklem		if (error)
502191783Srmacklem			goto out;
503191783Srmacklem		mtx_lock(&np->n_mtx);
504191783Srmacklem		np->n_mtime = vattr.va_mtime;
505191783Srmacklem		mtx_unlock(&np->n_mtx);
506191783Srmacklem	} else {
507191783Srmacklem		mtx_unlock(&np->n_mtx);
508191783Srmacklem		error = VOP_GETATTR(vp, &vattr, cred);
509191783Srmacklem		if (error)
510191783Srmacklem			return (error);
511191783Srmacklem		mtx_lock(&np->n_mtx);
512191783Srmacklem		if ((np->n_flag & NSIZECHANGED)
513191783Srmacklem		    || (NFS_TIMESPEC_COMPARE(&np->n_mtime, &vattr.va_mtime))) {
514191783Srmacklem			mtx_unlock(&np->n_mtx);
515191783Srmacklem			if (vp->v_type == VDIR)
516191783Srmacklem				ncl_invaldir(vp);
517191783Srmacklem			error = ncl_vinvalbuf(vp, V_SAVE, td, 1);
518191783Srmacklem			if (error)
519191783Srmacklem				goto out;
520191783Srmacklem			mtx_lock(&np->n_mtx);
521191783Srmacklem			np->n_mtime = vattr.va_mtime;
522191783Srmacklem			np->n_flag &= ~NSIZECHANGED;
523191783Srmacklem		}
524191783Srmacklem		mtx_unlock(&np->n_mtx);
525191783Srmacklem	}
526191783Srmacklemout:
527191783Srmacklem	ncl_downgrade_vnlock(vp, old_lock);
528191783Srmacklem	return error;
529191783Srmacklem}
530191783Srmacklem
531191783Srmacklem/*
532191783Srmacklem * Vnode op for read using bio
533191783Srmacklem */
534191783Srmacklemint
535191783Srmacklemncl_bioread(struct vnode *vp, struct uio *uio, int ioflag, struct ucred *cred)
536191783Srmacklem{
537191783Srmacklem	struct nfsnode *np = VTONFS(vp);
538191783Srmacklem	int biosize, i;
539191783Srmacklem	struct buf *bp, *rabp;
540191783Srmacklem	struct thread *td;
541191783Srmacklem	struct nfsmount *nmp = VFSTONFS(vp->v_mount);
542191783Srmacklem	daddr_t lbn, rabn;
543191783Srmacklem	int bcount;
544191783Srmacklem	int seqcount;
545191783Srmacklem	int nra, error = 0, n = 0, on = 0;
546191783Srmacklem
547191783Srmacklem#ifdef DIAGNOSTIC
548191783Srmacklem	if (uio->uio_rw != UIO_READ)
549191783Srmacklem		panic("ncl_read mode");
550191783Srmacklem#endif
551191783Srmacklem	if (uio->uio_resid == 0)
552191783Srmacklem		return (0);
553191783Srmacklem	if (uio->uio_offset < 0)	/* XXX VDIR cookies can be negative */
554191783Srmacklem		return (EINVAL);
555191783Srmacklem	td = uio->uio_td;
556191783Srmacklem
557191783Srmacklem	mtx_lock(&nmp->nm_mtx);
558191783Srmacklem	if ((nmp->nm_flag & NFSMNT_NFSV3) != 0 &&
559191783Srmacklem	    (nmp->nm_state & NFSSTA_GOTFSINFO) == 0) {
560191783Srmacklem		mtx_unlock(&nmp->nm_mtx);
561191783Srmacklem		(void)ncl_fsinfo(nmp, vp, cred, td);
562191783Srmacklem		mtx_lock(&nmp->nm_mtx);
563191783Srmacklem	}
564191783Srmacklem	if (nmp->nm_rsize == 0 || nmp->nm_readdirsize == 0)
565191783Srmacklem		(void) newnfs_iosize(nmp);
566191783Srmacklem	mtx_unlock(&nmp->nm_mtx);
567191783Srmacklem
568191783Srmacklem	if (vp->v_type != VDIR &&
569191783Srmacklem	    (uio->uio_offset + uio->uio_resid) > nmp->nm_maxfilesize)
570191783Srmacklem		return (EFBIG);
571191783Srmacklem
572191783Srmacklem	if (newnfs_directio_enable && (ioflag & IO_DIRECT) && (vp->v_type == VREG))
573191783Srmacklem		/* No caching/ no readaheads. Just read data into the user buffer */
574191783Srmacklem		return ncl_readrpc(vp, uio, cred);
575191783Srmacklem
576191783Srmacklem	biosize = vp->v_mount->mnt_stat.f_iosize;
577191783Srmacklem	seqcount = (int)((off_t)(ioflag >> IO_SEQSHIFT) * biosize / BKVASIZE);
578191783Srmacklem
579191783Srmacklem	error = nfs_bioread_check_cons(vp, td, cred);
580191783Srmacklem	if (error)
581191783Srmacklem		return error;
582191783Srmacklem
583191783Srmacklem	do {
584191783Srmacklem	    u_quad_t nsize;
585191783Srmacklem
586191783Srmacklem	    mtx_lock(&np->n_mtx);
587191783Srmacklem	    nsize = np->n_size;
588191783Srmacklem	    mtx_unlock(&np->n_mtx);
589191783Srmacklem
590191783Srmacklem	    switch (vp->v_type) {
591191783Srmacklem	    case VREG:
592191783Srmacklem		NFSINCRGLOBAL(newnfsstats.biocache_reads);
593191783Srmacklem		lbn = uio->uio_offset / biosize;
594191783Srmacklem		on = uio->uio_offset & (biosize - 1);
595191783Srmacklem
596191783Srmacklem		/*
597191783Srmacklem		 * Start the read ahead(s), as required.
598191783Srmacklem		 */
599191783Srmacklem		if (nmp->nm_readahead > 0) {
600191783Srmacklem		    for (nra = 0; nra < nmp->nm_readahead && nra < seqcount &&
601191783Srmacklem			(off_t)(lbn + 1 + nra) * biosize < nsize; nra++) {
602191783Srmacklem			rabn = lbn + 1 + nra;
603191783Srmacklem			if (incore(&vp->v_bufobj, rabn) == NULL) {
604191783Srmacklem			    rabp = nfs_getcacheblk(vp, rabn, biosize, td);
605191783Srmacklem			    if (!rabp) {
606191783Srmacklem				error = newnfs_sigintr(nmp, td);
607191783Srmacklem				if (error)
608191783Srmacklem				    return (error);
609191783Srmacklem				else
610191783Srmacklem				    break;
611191783Srmacklem			    }
612191783Srmacklem			    if ((rabp->b_flags & (B_CACHE|B_DELWRI)) == 0) {
613191783Srmacklem				rabp->b_flags |= B_ASYNC;
614191783Srmacklem				rabp->b_iocmd = BIO_READ;
615191783Srmacklem				vfs_busy_pages(rabp, 0);
616191783Srmacklem				if (ncl_asyncio(nmp, rabp, cred, td)) {
617191783Srmacklem				    rabp->b_flags |= B_INVAL;
618191783Srmacklem				    rabp->b_ioflags |= BIO_ERROR;
619191783Srmacklem				    vfs_unbusy_pages(rabp);
620191783Srmacklem				    brelse(rabp);
621191783Srmacklem				    break;
622191783Srmacklem				}
623191783Srmacklem			    } else {
624191783Srmacklem				brelse(rabp);
625191783Srmacklem			    }
626191783Srmacklem			}
627191783Srmacklem		    }
628191783Srmacklem		}
629191783Srmacklem
630191783Srmacklem		/* Note that bcount is *not* DEV_BSIZE aligned. */
631191783Srmacklem		bcount = biosize;
632191783Srmacklem		if ((off_t)lbn * biosize >= nsize) {
633191783Srmacklem			bcount = 0;
634191783Srmacklem		} else if ((off_t)(lbn + 1) * biosize > nsize) {
635191783Srmacklem			bcount = nsize - (off_t)lbn * biosize;
636191783Srmacklem		}
637191783Srmacklem		bp = nfs_getcacheblk(vp, lbn, bcount, td);
638191783Srmacklem
639191783Srmacklem		if (!bp) {
640191783Srmacklem			error = newnfs_sigintr(nmp, td);
641191783Srmacklem			return (error ? error : EINTR);
642191783Srmacklem		}
643191783Srmacklem
644191783Srmacklem		/*
645191783Srmacklem		 * If B_CACHE is not set, we must issue the read.  If this
646191783Srmacklem		 * fails, we return an error.
647191783Srmacklem		 */
648191783Srmacklem
649191783Srmacklem		if ((bp->b_flags & B_CACHE) == 0) {
650191783Srmacklem		    bp->b_iocmd = BIO_READ;
651191783Srmacklem		    vfs_busy_pages(bp, 0);
652191783Srmacklem		    error = ncl_doio(vp, bp, cred, td);
653191783Srmacklem		    if (error) {
654191783Srmacklem			brelse(bp);
655191783Srmacklem			return (error);
656191783Srmacklem		    }
657191783Srmacklem		}
658191783Srmacklem
659191783Srmacklem		/*
660191783Srmacklem		 * on is the offset into the current bp.  Figure out how many
661191783Srmacklem		 * bytes we can copy out of the bp.  Note that bcount is
662191783Srmacklem		 * NOT DEV_BSIZE aligned.
663191783Srmacklem		 *
664191783Srmacklem		 * Then figure out how many bytes we can copy into the uio.
665191783Srmacklem		 */
666191783Srmacklem
667191783Srmacklem		n = 0;
668191783Srmacklem		if (on < bcount)
669191783Srmacklem			n = min((unsigned)(bcount - on), uio->uio_resid);
670191783Srmacklem		break;
671191783Srmacklem	    case VLNK:
672191783Srmacklem		NFSINCRGLOBAL(newnfsstats.biocache_readlinks);
673191783Srmacklem		bp = nfs_getcacheblk(vp, (daddr_t)0, NFS_MAXPATHLEN, td);
674191783Srmacklem		if (!bp) {
675191783Srmacklem			error = newnfs_sigintr(nmp, td);
676191783Srmacklem			return (error ? error : EINTR);
677191783Srmacklem		}
678191783Srmacklem		if ((bp->b_flags & B_CACHE) == 0) {
679191783Srmacklem		    bp->b_iocmd = BIO_READ;
680191783Srmacklem		    vfs_busy_pages(bp, 0);
681191783Srmacklem		    error = ncl_doio(vp, bp, cred, td);
682191783Srmacklem		    if (error) {
683191783Srmacklem			bp->b_ioflags |= BIO_ERROR;
684191783Srmacklem			brelse(bp);
685191783Srmacklem			return (error);
686191783Srmacklem		    }
687191783Srmacklem		}
688191783Srmacklem		n = min(uio->uio_resid, NFS_MAXPATHLEN - bp->b_resid);
689191783Srmacklem		on = 0;
690191783Srmacklem		break;
691191783Srmacklem	    case VDIR:
692191783Srmacklem		NFSINCRGLOBAL(newnfsstats.biocache_readdirs);
693191783Srmacklem		if (np->n_direofoffset
694191783Srmacklem		    && uio->uio_offset >= np->n_direofoffset) {
695191783Srmacklem		    return (0);
696191783Srmacklem		}
697191783Srmacklem		lbn = (uoff_t)uio->uio_offset / NFS_DIRBLKSIZ;
698191783Srmacklem		on = uio->uio_offset & (NFS_DIRBLKSIZ - 1);
699191783Srmacklem		bp = nfs_getcacheblk(vp, lbn, NFS_DIRBLKSIZ, td);
700191783Srmacklem		if (!bp) {
701191783Srmacklem		    error = newnfs_sigintr(nmp, td);
702191783Srmacklem		    return (error ? error : EINTR);
703191783Srmacklem		}
704191783Srmacklem		if ((bp->b_flags & B_CACHE) == 0) {
705191783Srmacklem		    bp->b_iocmd = BIO_READ;
706191783Srmacklem		    vfs_busy_pages(bp, 0);
707191783Srmacklem		    error = ncl_doio(vp, bp, cred, td);
708191783Srmacklem		    if (error) {
709191783Srmacklem			    brelse(bp);
710191783Srmacklem		    }
711191783Srmacklem		    while (error == NFSERR_BAD_COOKIE) {
712191783Srmacklem			ncl_invaldir(vp);
713191783Srmacklem			error = ncl_vinvalbuf(vp, 0, td, 1);
714191783Srmacklem			/*
715191783Srmacklem			 * Yuck! The directory has been modified on the
716191783Srmacklem			 * server. The only way to get the block is by
717191783Srmacklem			 * reading from the beginning to get all the
718191783Srmacklem			 * offset cookies.
719191783Srmacklem			 *
720191783Srmacklem			 * Leave the last bp intact unless there is an error.
721191783Srmacklem			 * Loop back up to the while if the error is another
722191783Srmacklem			 * NFSERR_BAD_COOKIE (double yuch!).
723191783Srmacklem			 */
724191783Srmacklem			for (i = 0; i <= lbn && !error; i++) {
725191783Srmacklem			    if (np->n_direofoffset
726191783Srmacklem				&& (i * NFS_DIRBLKSIZ) >= np->n_direofoffset)
727191783Srmacklem				    return (0);
728191783Srmacklem			    bp = nfs_getcacheblk(vp, i, NFS_DIRBLKSIZ, td);
729191783Srmacklem			    if (!bp) {
730191783Srmacklem				error = newnfs_sigintr(nmp, td);
731191783Srmacklem				return (error ? error : EINTR);
732191783Srmacklem			    }
733191783Srmacklem			    if ((bp->b_flags & B_CACHE) == 0) {
734191783Srmacklem				    bp->b_iocmd = BIO_READ;
735191783Srmacklem				    vfs_busy_pages(bp, 0);
736191783Srmacklem				    error = ncl_doio(vp, bp, cred, td);
737191783Srmacklem				    /*
738191783Srmacklem				     * no error + B_INVAL == directory EOF,
739191783Srmacklem				     * use the block.
740191783Srmacklem				     */
741191783Srmacklem				    if (error == 0 && (bp->b_flags & B_INVAL))
742191783Srmacklem					    break;
743191783Srmacklem			    }
744191783Srmacklem			    /*
745191783Srmacklem			     * An error will throw away the block and the
746191783Srmacklem			     * for loop will break out.  If no error and this
747191783Srmacklem			     * is not the block we want, we throw away the
748191783Srmacklem			     * block and go for the next one via the for loop.
749191783Srmacklem			     */
750191783Srmacklem			    if (error || i < lbn)
751191783Srmacklem				    brelse(bp);
752191783Srmacklem			}
753191783Srmacklem		    }
754191783Srmacklem		    /*
755191783Srmacklem		     * The above while is repeated if we hit another cookie
756191783Srmacklem		     * error.  If we hit an error and it wasn't a cookie error,
757191783Srmacklem		     * we give up.
758191783Srmacklem		     */
759191783Srmacklem		    if (error)
760191783Srmacklem			    return (error);
761191783Srmacklem		}
762191783Srmacklem
763191783Srmacklem		/*
764191783Srmacklem		 * If not eof and read aheads are enabled, start one.
765191783Srmacklem		 * (You need the current block first, so that you have the
766191783Srmacklem		 *  directory offset cookie of the next block.)
767191783Srmacklem		 */
768191783Srmacklem		if (nmp->nm_readahead > 0 &&
769191783Srmacklem		    (bp->b_flags & B_INVAL) == 0 &&
770191783Srmacklem		    (np->n_direofoffset == 0 ||
771191783Srmacklem		    (lbn + 1) * NFS_DIRBLKSIZ < np->n_direofoffset) &&
772191783Srmacklem		    incore(&vp->v_bufobj, lbn + 1) == NULL) {
773191783Srmacklem			rabp = nfs_getcacheblk(vp, lbn + 1, NFS_DIRBLKSIZ, td);
774191783Srmacklem			if (rabp) {
775191783Srmacklem			    if ((rabp->b_flags & (B_CACHE|B_DELWRI)) == 0) {
776191783Srmacklem				rabp->b_flags |= B_ASYNC;
777191783Srmacklem				rabp->b_iocmd = BIO_READ;
778191783Srmacklem				vfs_busy_pages(rabp, 0);
779191783Srmacklem				if (ncl_asyncio(nmp, rabp, cred, td)) {
780191783Srmacklem				    rabp->b_flags |= B_INVAL;
781191783Srmacklem				    rabp->b_ioflags |= BIO_ERROR;
782191783Srmacklem				    vfs_unbusy_pages(rabp);
783191783Srmacklem				    brelse(rabp);
784191783Srmacklem				}
785191783Srmacklem			    } else {
786191783Srmacklem				brelse(rabp);
787191783Srmacklem			    }
788191783Srmacklem			}
789191783Srmacklem		}
790191783Srmacklem		/*
791191783Srmacklem		 * Unlike VREG files, whos buffer size ( bp->b_bcount ) is
792191783Srmacklem		 * chopped for the EOF condition, we cannot tell how large
793191783Srmacklem		 * NFS directories are going to be until we hit EOF.  So
794191783Srmacklem		 * an NFS directory buffer is *not* chopped to its EOF.  Now,
795191783Srmacklem		 * it just so happens that b_resid will effectively chop it
796191783Srmacklem		 * to EOF.  *BUT* this information is lost if the buffer goes
797191783Srmacklem		 * away and is reconstituted into a B_CACHE state ( due to
798191783Srmacklem		 * being VMIO ) later.  So we keep track of the directory eof
799191783Srmacklem		 * in np->n_direofoffset and chop it off as an extra step
800191783Srmacklem		 * right here.
801191783Srmacklem		 */
802191783Srmacklem		n = lmin(uio->uio_resid, NFS_DIRBLKSIZ - bp->b_resid - on);
803191783Srmacklem		if (np->n_direofoffset && n > np->n_direofoffset - uio->uio_offset)
804191783Srmacklem			n = np->n_direofoffset - uio->uio_offset;
805191783Srmacklem		break;
806191783Srmacklem	    default:
807191783Srmacklem		ncl_printf(" ncl_bioread: type %x unexpected\n", vp->v_type);
808191783Srmacklem		bp = NULL;
809191783Srmacklem		break;
810191783Srmacklem	    };
811191783Srmacklem
812191783Srmacklem	    if (n > 0) {
813191783Srmacklem		    error = uiomove(bp->b_data + on, (int)n, uio);
814191783Srmacklem	    }
815191783Srmacklem	    if (vp->v_type == VLNK)
816191783Srmacklem		n = 0;
817191783Srmacklem	    if (bp != NULL)
818191783Srmacklem		brelse(bp);
819191783Srmacklem	} while (error == 0 && uio->uio_resid > 0 && n > 0);
820191783Srmacklem	return (error);
821191783Srmacklem}
822191783Srmacklem
823191783Srmacklem/*
824191783Srmacklem * The NFS write path cannot handle iovecs with len > 1. So we need to
825191783Srmacklem * break up iovecs accordingly (restricting them to wsize).
826191783Srmacklem * For the SYNC case, we can do this with 1 copy (user buffer -> mbuf).
827191783Srmacklem * For the ASYNC case, 2 copies are needed. The first a copy from the
828191783Srmacklem * user buffer to a staging buffer and then a second copy from the staging
829191783Srmacklem * buffer to mbufs. This can be optimized by copying from the user buffer
830191783Srmacklem * directly into mbufs and passing the chain down, but that requires a
831191783Srmacklem * fair amount of re-working of the relevant codepaths (and can be done
832191783Srmacklem * later).
833191783Srmacklem */
834191783Srmacklemstatic int
835191783Srmacklemnfs_directio_write(vp, uiop, cred, ioflag)
836191783Srmacklem	struct vnode *vp;
837191783Srmacklem	struct uio *uiop;
838191783Srmacklem	struct ucred *cred;
839191783Srmacklem	int ioflag;
840191783Srmacklem{
841191783Srmacklem	int error;
842191783Srmacklem	struct nfsmount *nmp = VFSTONFS(vp->v_mount);
843191783Srmacklem	struct thread *td = uiop->uio_td;
844191783Srmacklem	int size;
845191783Srmacklem	int wsize;
846191783Srmacklem
847191783Srmacklem	mtx_lock(&nmp->nm_mtx);
848191783Srmacklem	wsize = nmp->nm_wsize;
849191783Srmacklem	mtx_unlock(&nmp->nm_mtx);
850191783Srmacklem	if (ioflag & IO_SYNC) {
851191783Srmacklem		int iomode, must_commit;
852191783Srmacklem		struct uio uio;
853191783Srmacklem		struct iovec iov;
854191783Srmacklemdo_sync:
855191783Srmacklem		while (uiop->uio_resid > 0) {
856191783Srmacklem			size = min(uiop->uio_resid, wsize);
857191783Srmacklem			size = min(uiop->uio_iov->iov_len, size);
858191783Srmacklem			iov.iov_base = uiop->uio_iov->iov_base;
859191783Srmacklem			iov.iov_len = size;
860191783Srmacklem			uio.uio_iov = &iov;
861191783Srmacklem			uio.uio_iovcnt = 1;
862191783Srmacklem			uio.uio_offset = uiop->uio_offset;
863191783Srmacklem			uio.uio_resid = size;
864191783Srmacklem			uio.uio_segflg = UIO_USERSPACE;
865191783Srmacklem			uio.uio_rw = UIO_WRITE;
866191783Srmacklem			uio.uio_td = td;
867191783Srmacklem			iomode = NFSWRITE_FILESYNC;
868191783Srmacklem			error = ncl_writerpc(vp, &uio, cred, &iomode,
869191783Srmacklem			    &must_commit);
870191783Srmacklem			KASSERT((must_commit == 0),
871191783Srmacklem				("ncl_directio_write: Did not commit write"));
872191783Srmacklem			if (error)
873191783Srmacklem				return (error);
874191783Srmacklem			uiop->uio_offset += size;
875191783Srmacklem			uiop->uio_resid -= size;
876191783Srmacklem			if (uiop->uio_iov->iov_len <= size) {
877191783Srmacklem				uiop->uio_iovcnt--;
878191783Srmacklem				uiop->uio_iov++;
879191783Srmacklem			} else {
880191783Srmacklem				uiop->uio_iov->iov_base =
881191783Srmacklem					(char *)uiop->uio_iov->iov_base + size;
882191783Srmacklem				uiop->uio_iov->iov_len -= size;
883191783Srmacklem			}
884191783Srmacklem		}
885191783Srmacklem	} else {
886191783Srmacklem		struct uio *t_uio;
887191783Srmacklem		struct iovec *t_iov;
888191783Srmacklem		struct buf *bp;
889191783Srmacklem
890191783Srmacklem		/*
891191783Srmacklem		 * Break up the write into blocksize chunks and hand these
892191783Srmacklem		 * over to nfsiod's for write back.
893191783Srmacklem		 * Unfortunately, this incurs a copy of the data. Since
894191783Srmacklem		 * the user could modify the buffer before the write is
895191783Srmacklem		 * initiated.
896191783Srmacklem		 *
897191783Srmacklem		 * The obvious optimization here is that one of the 2 copies
898191783Srmacklem		 * in the async write path can be eliminated by copying the
899191783Srmacklem		 * data here directly into mbufs and passing the mbuf chain
900191783Srmacklem		 * down. But that will require a fair amount of re-working
901191783Srmacklem		 * of the code and can be done if there's enough interest
902191783Srmacklem		 * in NFS directio access.
903191783Srmacklem		 */
904191783Srmacklem		while (uiop->uio_resid > 0) {
905191783Srmacklem			size = min(uiop->uio_resid, wsize);
906191783Srmacklem			size = min(uiop->uio_iov->iov_len, size);
907191783Srmacklem			bp = getpbuf(&ncl_pbuf_freecnt);
908191783Srmacklem			t_uio = malloc(sizeof(struct uio), M_NFSDIRECTIO, M_WAITOK);
909191783Srmacklem			t_iov = malloc(sizeof(struct iovec), M_NFSDIRECTIO, M_WAITOK);
910191783Srmacklem			t_iov->iov_base = malloc(size, M_NFSDIRECTIO, M_WAITOK);
911191783Srmacklem			t_iov->iov_len = size;
912191783Srmacklem			t_uio->uio_iov = t_iov;
913191783Srmacklem			t_uio->uio_iovcnt = 1;
914191783Srmacklem			t_uio->uio_offset = uiop->uio_offset;
915191783Srmacklem			t_uio->uio_resid = size;
916191783Srmacklem			t_uio->uio_segflg = UIO_SYSSPACE;
917191783Srmacklem			t_uio->uio_rw = UIO_WRITE;
918191783Srmacklem			t_uio->uio_td = td;
919191783Srmacklem			bcopy(uiop->uio_iov->iov_base, t_iov->iov_base, size);
920191783Srmacklem			bp->b_flags |= B_DIRECT;
921191783Srmacklem			bp->b_iocmd = BIO_WRITE;
922191783Srmacklem			if (cred != NOCRED) {
923191783Srmacklem				crhold(cred);
924191783Srmacklem				bp->b_wcred = cred;
925191783Srmacklem			} else
926191783Srmacklem				bp->b_wcred = NOCRED;
927191783Srmacklem			bp->b_caller1 = (void *)t_uio;
928191783Srmacklem			bp->b_vp = vp;
929191783Srmacklem			error = ncl_asyncio(nmp, bp, NOCRED, td);
930191783Srmacklem			if (error) {
931191783Srmacklem				free(t_iov->iov_base, M_NFSDIRECTIO);
932191783Srmacklem				free(t_iov, M_NFSDIRECTIO);
933191783Srmacklem				free(t_uio, M_NFSDIRECTIO);
934191783Srmacklem				bp->b_vp = NULL;
935191783Srmacklem				relpbuf(bp, &ncl_pbuf_freecnt);
936191783Srmacklem				if (error == EINTR)
937191783Srmacklem					return (error);
938191783Srmacklem				goto do_sync;
939191783Srmacklem			}
940191783Srmacklem			uiop->uio_offset += size;
941191783Srmacklem			uiop->uio_resid -= size;
942191783Srmacklem			if (uiop->uio_iov->iov_len <= size) {
943191783Srmacklem				uiop->uio_iovcnt--;
944191783Srmacklem				uiop->uio_iov++;
945191783Srmacklem			} else {
946191783Srmacklem				uiop->uio_iov->iov_base =
947191783Srmacklem					(char *)uiop->uio_iov->iov_base + size;
948191783Srmacklem				uiop->uio_iov->iov_len -= size;
949191783Srmacklem			}
950191783Srmacklem		}
951191783Srmacklem	}
952191783Srmacklem	return (0);
953191783Srmacklem}
954191783Srmacklem
955191783Srmacklem/*
956191783Srmacklem * Vnode op for write using bio
957191783Srmacklem */
958191783Srmacklemint
959191783Srmacklemncl_write(struct vop_write_args *ap)
960191783Srmacklem{
961191783Srmacklem	int biosize;
962191783Srmacklem	struct uio *uio = ap->a_uio;
963191783Srmacklem	struct thread *td = uio->uio_td;
964191783Srmacklem	struct vnode *vp = ap->a_vp;
965191783Srmacklem	struct nfsnode *np = VTONFS(vp);
966191783Srmacklem	struct ucred *cred = ap->a_cred;
967191783Srmacklem	int ioflag = ap->a_ioflag;
968191783Srmacklem	struct buf *bp;
969191783Srmacklem	struct vattr vattr;
970191783Srmacklem	struct nfsmount *nmp = VFSTONFS(vp->v_mount);
971191783Srmacklem	daddr_t lbn;
972191783Srmacklem	int bcount;
973191783Srmacklem	int n, on, error = 0;
974191783Srmacklem	struct proc *p = td?td->td_proc:NULL;
975191783Srmacklem
976191783Srmacklem#ifdef DIAGNOSTIC
977191783Srmacklem	if (uio->uio_rw != UIO_WRITE)
978191783Srmacklem		panic("ncl_write mode");
979191783Srmacklem	if (uio->uio_segflg == UIO_USERSPACE && uio->uio_td != curthread)
980191783Srmacklem		panic("ncl_write proc");
981191783Srmacklem#endif
982191783Srmacklem	if (vp->v_type != VREG)
983191783Srmacklem		return (EIO);
984191783Srmacklem	mtx_lock(&np->n_mtx);
985191783Srmacklem	if (np->n_flag & NWRITEERR) {
986191783Srmacklem		np->n_flag &= ~NWRITEERR;
987191783Srmacklem		mtx_unlock(&np->n_mtx);
988191783Srmacklem		return (np->n_error);
989191783Srmacklem	} else
990191783Srmacklem		mtx_unlock(&np->n_mtx);
991191783Srmacklem	mtx_lock(&nmp->nm_mtx);
992191783Srmacklem	if ((nmp->nm_flag & NFSMNT_NFSV3) != 0 &&
993191783Srmacklem	    (nmp->nm_state & NFSSTA_GOTFSINFO) == 0) {
994191783Srmacklem		mtx_unlock(&nmp->nm_mtx);
995191783Srmacklem		(void)ncl_fsinfo(nmp, vp, cred, td);
996191783Srmacklem		mtx_lock(&nmp->nm_mtx);
997191783Srmacklem	}
998191783Srmacklem	if (nmp->nm_wsize == 0)
999191783Srmacklem		(void) newnfs_iosize(nmp);
1000191783Srmacklem	mtx_unlock(&nmp->nm_mtx);
1001191783Srmacklem
1002191783Srmacklem	/*
1003191783Srmacklem	 * Synchronously flush pending buffers if we are in synchronous
1004191783Srmacklem	 * mode or if we are appending.
1005191783Srmacklem	 */
1006191783Srmacklem	if (ioflag & (IO_APPEND | IO_SYNC)) {
1007191783Srmacklem		mtx_lock(&np->n_mtx);
1008191783Srmacklem		if (np->n_flag & NMODIFIED) {
1009191783Srmacklem			mtx_unlock(&np->n_mtx);
1010191783Srmacklem#ifdef notyet /* Needs matching nonblock semantics elsewhere, too. */
1011191783Srmacklem			/*
1012191783Srmacklem			 * Require non-blocking, synchronous writes to
1013191783Srmacklem			 * dirty files to inform the program it needs
1014191783Srmacklem			 * to fsync(2) explicitly.
1015191783Srmacklem			 */
1016191783Srmacklem			if (ioflag & IO_NDELAY)
1017191783Srmacklem				return (EAGAIN);
1018191783Srmacklem#endif
1019191783Srmacklemflush_and_restart:
1020191783Srmacklem			np->n_attrstamp = 0;
1021191783Srmacklem			error = ncl_vinvalbuf(vp, V_SAVE, td, 1);
1022191783Srmacklem			if (error)
1023191783Srmacklem				return (error);
1024191783Srmacklem		} else
1025191783Srmacklem			mtx_unlock(&np->n_mtx);
1026191783Srmacklem	}
1027191783Srmacklem
1028191783Srmacklem	/*
1029191783Srmacklem	 * If IO_APPEND then load uio_offset.  We restart here if we cannot
1030191783Srmacklem	 * get the append lock.
1031191783Srmacklem	 */
1032191783Srmacklem	if (ioflag & IO_APPEND) {
1033191783Srmacklem		np->n_attrstamp = 0;
1034191783Srmacklem		error = VOP_GETATTR(vp, &vattr, cred);
1035191783Srmacklem		if (error)
1036191783Srmacklem			return (error);
1037191783Srmacklem		mtx_lock(&np->n_mtx);
1038191783Srmacklem		uio->uio_offset = np->n_size;
1039191783Srmacklem		mtx_unlock(&np->n_mtx);
1040191783Srmacklem	}
1041191783Srmacklem
1042191783Srmacklem	if (uio->uio_offset < 0)
1043191783Srmacklem		return (EINVAL);
1044191783Srmacklem	if ((uio->uio_offset + uio->uio_resid) > nmp->nm_maxfilesize)
1045191783Srmacklem		return (EFBIG);
1046191783Srmacklem	if (uio->uio_resid == 0)
1047191783Srmacklem		return (0);
1048191783Srmacklem
1049191783Srmacklem	if (newnfs_directio_enable && (ioflag & IO_DIRECT) && vp->v_type == VREG)
1050191783Srmacklem		return nfs_directio_write(vp, uio, cred, ioflag);
1051191783Srmacklem
1052191783Srmacklem	/*
1053191783Srmacklem	 * Maybe this should be above the vnode op call, but so long as
1054191783Srmacklem	 * file servers have no limits, i don't think it matters
1055191783Srmacklem	 */
1056191783Srmacklem	if (p != NULL) {
1057191783Srmacklem		PROC_LOCK(p);
1058191783Srmacklem		if (uio->uio_offset + uio->uio_resid >
1059191783Srmacklem		    lim_cur(p, RLIMIT_FSIZE)) {
1060191783Srmacklem			psignal(p, SIGXFSZ);
1061191783Srmacklem			PROC_UNLOCK(p);
1062191783Srmacklem			return (EFBIG);
1063191783Srmacklem		}
1064191783Srmacklem		PROC_UNLOCK(p);
1065191783Srmacklem	}
1066191783Srmacklem
1067191783Srmacklem	biosize = vp->v_mount->mnt_stat.f_iosize;
1068191783Srmacklem	/*
1069191783Srmacklem	 * Find all of this file's B_NEEDCOMMIT buffers.  If our writes
1070191783Srmacklem	 * would exceed the local maximum per-file write commit size when
1071191783Srmacklem	 * combined with those, we must decide whether to flush,
1072191783Srmacklem	 * go synchronous, or return error.  We don't bother checking
1073191783Srmacklem	 * IO_UNIT -- we just make all writes atomic anyway, as there's
1074191783Srmacklem	 * no point optimizing for something that really won't ever happen.
1075191783Srmacklem	 */
1076191783Srmacklem	if (!(ioflag & IO_SYNC)) {
1077191783Srmacklem		int nflag;
1078191783Srmacklem
1079191783Srmacklem		mtx_lock(&np->n_mtx);
1080191783Srmacklem		nflag = np->n_flag;
1081191783Srmacklem		mtx_unlock(&np->n_mtx);
1082191783Srmacklem		int needrestart = 0;
1083191783Srmacklem		if (nmp->nm_wcommitsize < uio->uio_resid) {
1084191783Srmacklem			/*
1085191783Srmacklem			 * If this request could not possibly be completed
1086191783Srmacklem			 * without exceeding the maximum outstanding write
1087191783Srmacklem			 * commit size, see if we can convert it into a
1088191783Srmacklem			 * synchronous write operation.
1089191783Srmacklem			 */
1090191783Srmacklem			if (ioflag & IO_NDELAY)
1091191783Srmacklem				return (EAGAIN);
1092191783Srmacklem			ioflag |= IO_SYNC;
1093191783Srmacklem			if (nflag & NMODIFIED)
1094191783Srmacklem				needrestart = 1;
1095191783Srmacklem		} else if (nflag & NMODIFIED) {
1096191783Srmacklem			int wouldcommit = 0;
1097191783Srmacklem			BO_LOCK(&vp->v_bufobj);
1098191783Srmacklem			if (vp->v_bufobj.bo_dirty.bv_cnt != 0) {
1099191783Srmacklem				TAILQ_FOREACH(bp, &vp->v_bufobj.bo_dirty.bv_hd,
1100191783Srmacklem				    b_bobufs) {
1101191783Srmacklem					if (bp->b_flags & B_NEEDCOMMIT)
1102191783Srmacklem						wouldcommit += bp->b_bcount;
1103191783Srmacklem				}
1104191783Srmacklem			}
1105191783Srmacklem			BO_UNLOCK(&vp->v_bufobj);
1106191783Srmacklem			/*
1107191783Srmacklem			 * Since we're not operating synchronously and
1108191783Srmacklem			 * bypassing the buffer cache, we are in a commit
1109191783Srmacklem			 * and holding all of these buffers whether
1110191783Srmacklem			 * transmitted or not.  If not limited, this
1111191783Srmacklem			 * will lead to the buffer cache deadlocking,
1112191783Srmacklem			 * as no one else can flush our uncommitted buffers.
1113191783Srmacklem			 */
1114191783Srmacklem			wouldcommit += uio->uio_resid;
1115191783Srmacklem			/*
1116191783Srmacklem			 * If we would initially exceed the maximum
1117191783Srmacklem			 * outstanding write commit size, flush and restart.
1118191783Srmacklem			 */
1119191783Srmacklem			if (wouldcommit > nmp->nm_wcommitsize)
1120191783Srmacklem				needrestart = 1;
1121191783Srmacklem		}
1122191783Srmacklem		if (needrestart)
1123191783Srmacklem			goto flush_and_restart;
1124191783Srmacklem	}
1125191783Srmacklem
1126191783Srmacklem	do {
1127191783Srmacklem		NFSINCRGLOBAL(newnfsstats.biocache_writes);
1128191783Srmacklem		lbn = uio->uio_offset / biosize;
1129191783Srmacklem		on = uio->uio_offset & (biosize-1);
1130191783Srmacklem		n = min((unsigned)(biosize - on), uio->uio_resid);
1131191783Srmacklemagain:
1132191783Srmacklem		/*
1133191783Srmacklem		 * Handle direct append and file extension cases, calculate
1134191783Srmacklem		 * unaligned buffer size.
1135191783Srmacklem		 */
1136191783Srmacklem		mtx_lock(&np->n_mtx);
1137191783Srmacklem		if (uio->uio_offset == np->n_size && n) {
1138191783Srmacklem			mtx_unlock(&np->n_mtx);
1139191783Srmacklem			/*
1140191783Srmacklem			 * Get the buffer (in its pre-append state to maintain
1141191783Srmacklem			 * B_CACHE if it was previously set).  Resize the
1142191783Srmacklem			 * nfsnode after we have locked the buffer to prevent
1143191783Srmacklem			 * readers from reading garbage.
1144191783Srmacklem			 */
1145191783Srmacklem			bcount = on;
1146191783Srmacklem			bp = nfs_getcacheblk(vp, lbn, bcount, td);
1147191783Srmacklem
1148191783Srmacklem			if (bp != NULL) {
1149191783Srmacklem				long save;
1150191783Srmacklem
1151191783Srmacklem				mtx_lock(&np->n_mtx);
1152191783Srmacklem				np->n_size = uio->uio_offset + n;
1153191783Srmacklem				np->n_flag |= NMODIFIED;
1154191783Srmacklem				vnode_pager_setsize(vp, np->n_size);
1155191783Srmacklem				mtx_unlock(&np->n_mtx);
1156191783Srmacklem
1157191783Srmacklem				save = bp->b_flags & B_CACHE;
1158191783Srmacklem				bcount += n;
1159191783Srmacklem				allocbuf(bp, bcount);
1160191783Srmacklem				bp->b_flags |= save;
1161191783Srmacklem			}
1162191783Srmacklem		} else {
1163191783Srmacklem			/*
1164191783Srmacklem			 * Obtain the locked cache block first, and then
1165191783Srmacklem			 * adjust the file's size as appropriate.
1166191783Srmacklem			 */
1167191783Srmacklem			bcount = on + n;
1168191783Srmacklem			if ((off_t)lbn * biosize + bcount < np->n_size) {
1169191783Srmacklem				if ((off_t)(lbn + 1) * biosize < np->n_size)
1170191783Srmacklem					bcount = biosize;
1171191783Srmacklem				else
1172191783Srmacklem					bcount = np->n_size - (off_t)lbn * biosize;
1173191783Srmacklem			}
1174191783Srmacklem			mtx_unlock(&np->n_mtx);
1175191783Srmacklem			bp = nfs_getcacheblk(vp, lbn, bcount, td);
1176191783Srmacklem			mtx_lock(&np->n_mtx);
1177191783Srmacklem			if (uio->uio_offset + n > np->n_size) {
1178191783Srmacklem				np->n_size = uio->uio_offset + n;
1179191783Srmacklem				np->n_flag |= NMODIFIED;
1180191783Srmacklem				vnode_pager_setsize(vp, np->n_size);
1181191783Srmacklem			}
1182191783Srmacklem			mtx_unlock(&np->n_mtx);
1183191783Srmacklem		}
1184191783Srmacklem
1185191783Srmacklem		if (!bp) {
1186191783Srmacklem			error = newnfs_sigintr(nmp, td);
1187191783Srmacklem			if (!error)
1188191783Srmacklem				error = EINTR;
1189191783Srmacklem			break;
1190191783Srmacklem		}
1191191783Srmacklem
1192191783Srmacklem		/*
1193191783Srmacklem		 * Issue a READ if B_CACHE is not set.  In special-append
1194191783Srmacklem		 * mode, B_CACHE is based on the buffer prior to the write
1195191783Srmacklem		 * op and is typically set, avoiding the read.  If a read
1196191783Srmacklem		 * is required in special append mode, the server will
1197191783Srmacklem		 * probably send us a short-read since we extended the file
1198191783Srmacklem		 * on our end, resulting in b_resid == 0 and, thusly,
1199191783Srmacklem		 * B_CACHE getting set.
1200191783Srmacklem		 *
1201191783Srmacklem		 * We can also avoid issuing the read if the write covers
1202191783Srmacklem		 * the entire buffer.  We have to make sure the buffer state
1203191783Srmacklem		 * is reasonable in this case since we will not be initiating
1204191783Srmacklem		 * I/O.  See the comments in kern/vfs_bio.c's getblk() for
1205191783Srmacklem		 * more information.
1206191783Srmacklem		 *
1207191783Srmacklem		 * B_CACHE may also be set due to the buffer being cached
1208191783Srmacklem		 * normally.
1209191783Srmacklem		 */
1210191783Srmacklem
1211191783Srmacklem		if (on == 0 && n == bcount) {
1212191783Srmacklem			bp->b_flags |= B_CACHE;
1213191783Srmacklem			bp->b_flags &= ~B_INVAL;
1214191783Srmacklem			bp->b_ioflags &= ~BIO_ERROR;
1215191783Srmacklem		}
1216191783Srmacklem
1217191783Srmacklem		if ((bp->b_flags & B_CACHE) == 0) {
1218191783Srmacklem			bp->b_iocmd = BIO_READ;
1219191783Srmacklem			vfs_busy_pages(bp, 0);
1220191783Srmacklem			error = ncl_doio(vp, bp, cred, td);
1221191783Srmacklem			if (error) {
1222191783Srmacklem				brelse(bp);
1223191783Srmacklem				break;
1224191783Srmacklem			}
1225191783Srmacklem		}
1226191783Srmacklem		if (bp->b_wcred == NOCRED)
1227191783Srmacklem			bp->b_wcred = crhold(cred);
1228191783Srmacklem		mtx_lock(&np->n_mtx);
1229191783Srmacklem		np->n_flag |= NMODIFIED;
1230191783Srmacklem		mtx_unlock(&np->n_mtx);
1231191783Srmacklem
1232191783Srmacklem		/*
1233191783Srmacklem		 * If dirtyend exceeds file size, chop it down.  This should
1234191783Srmacklem		 * not normally occur but there is an append race where it
1235191783Srmacklem		 * might occur XXX, so we log it.
1236191783Srmacklem		 *
1237191783Srmacklem		 * If the chopping creates a reverse-indexed or degenerate
1238191783Srmacklem		 * situation with dirtyoff/end, we 0 both of them.
1239191783Srmacklem		 */
1240191783Srmacklem
1241191783Srmacklem		if (bp->b_dirtyend > bcount) {
1242191783Srmacklem			ncl_printf("NFS append race @%lx:%d\n",
1243191783Srmacklem			    (long)bp->b_blkno * DEV_BSIZE,
1244191783Srmacklem			    bp->b_dirtyend - bcount);
1245191783Srmacklem			bp->b_dirtyend = bcount;
1246191783Srmacklem		}
1247191783Srmacklem
1248191783Srmacklem		if (bp->b_dirtyoff >= bp->b_dirtyend)
1249191783Srmacklem			bp->b_dirtyoff = bp->b_dirtyend = 0;
1250191783Srmacklem
1251191783Srmacklem		/*
1252191783Srmacklem		 * If the new write will leave a contiguous dirty
1253191783Srmacklem		 * area, just update the b_dirtyoff and b_dirtyend,
1254191783Srmacklem		 * otherwise force a write rpc of the old dirty area.
1255191783Srmacklem		 *
1256191783Srmacklem		 * While it is possible to merge discontiguous writes due to
1257191783Srmacklem		 * our having a B_CACHE buffer ( and thus valid read data
1258191783Srmacklem		 * for the hole), we don't because it could lead to
1259191783Srmacklem		 * significant cache coherency problems with multiple clients,
1260191783Srmacklem		 * especially if locking is implemented later on.
1261191783Srmacklem		 *
1262191783Srmacklem		 * as an optimization we could theoretically maintain
1263191783Srmacklem		 * a linked list of discontinuous areas, but we would still
1264191783Srmacklem		 * have to commit them separately so there isn't much
1265191783Srmacklem		 * advantage to it except perhaps a bit of asynchronization.
1266191783Srmacklem		 */
1267191783Srmacklem
1268191783Srmacklem		if (bp->b_dirtyend > 0 &&
1269191783Srmacklem		    (on > bp->b_dirtyend || (on + n) < bp->b_dirtyoff)) {
1270191783Srmacklem			if (bwrite(bp) == EINTR) {
1271191783Srmacklem				error = EINTR;
1272191783Srmacklem				break;
1273191783Srmacklem			}
1274191783Srmacklem			goto again;
1275191783Srmacklem		}
1276191783Srmacklem
1277191783Srmacklem		error = uiomove((char *)bp->b_data + on, n, uio);
1278191783Srmacklem
1279191783Srmacklem		/*
1280191783Srmacklem		 * Since this block is being modified, it must be written
1281191783Srmacklem		 * again and not just committed.  Since write clustering does
1282191783Srmacklem		 * not work for the stage 1 data write, only the stage 2
1283191783Srmacklem		 * commit rpc, we have to clear B_CLUSTEROK as well.
1284191783Srmacklem		 */
1285191783Srmacklem		bp->b_flags &= ~(B_NEEDCOMMIT | B_CLUSTEROK);
1286191783Srmacklem
1287191783Srmacklem		if (error) {
1288191783Srmacklem			bp->b_ioflags |= BIO_ERROR;
1289191783Srmacklem			brelse(bp);
1290191783Srmacklem			break;
1291191783Srmacklem		}
1292191783Srmacklem
1293191783Srmacklem		/*
1294191783Srmacklem		 * Only update dirtyoff/dirtyend if not a degenerate
1295191783Srmacklem		 * condition.
1296191783Srmacklem		 */
1297191783Srmacklem		if (n) {
1298191783Srmacklem			if (bp->b_dirtyend > 0) {
1299191783Srmacklem				bp->b_dirtyoff = min(on, bp->b_dirtyoff);
1300191783Srmacklem				bp->b_dirtyend = max((on + n), bp->b_dirtyend);
1301191783Srmacklem			} else {
1302191783Srmacklem				bp->b_dirtyoff = on;
1303191783Srmacklem				bp->b_dirtyend = on + n;
1304191783Srmacklem			}
1305193187Salc			vfs_bio_set_valid(bp, on, n);
1306191783Srmacklem		}
1307191783Srmacklem
1308191783Srmacklem		/*
1309191783Srmacklem		 * If IO_SYNC do bwrite().
1310191783Srmacklem		 *
1311191783Srmacklem		 * IO_INVAL appears to be unused.  The idea appears to be
1312191783Srmacklem		 * to turn off caching in this case.  Very odd.  XXX
1313191783Srmacklem		 */
1314191783Srmacklem		if ((ioflag & IO_SYNC)) {
1315191783Srmacklem			if (ioflag & IO_INVAL)
1316191783Srmacklem				bp->b_flags |= B_NOCACHE;
1317191783Srmacklem			error = bwrite(bp);
1318191783Srmacklem			if (error)
1319191783Srmacklem				break;
1320191783Srmacklem		} else if ((n + on) == biosize) {
1321191783Srmacklem			bp->b_flags |= B_ASYNC;
1322191783Srmacklem			(void) ncl_writebp(bp, 0, NULL);
1323191783Srmacklem		} else {
1324191783Srmacklem			bdwrite(bp);
1325191783Srmacklem		}
1326191783Srmacklem	} while (uio->uio_resid > 0 && n > 0);
1327191783Srmacklem
1328191783Srmacklem	return (error);
1329191783Srmacklem}
1330191783Srmacklem
1331191783Srmacklem/*
1332191783Srmacklem * Get an nfs cache block.
1333191783Srmacklem *
1334191783Srmacklem * Allocate a new one if the block isn't currently in the cache
1335191783Srmacklem * and return the block marked busy. If the calling process is
1336191783Srmacklem * interrupted by a signal for an interruptible mount point, return
1337191783Srmacklem * NULL.
1338191783Srmacklem *
1339191783Srmacklem * The caller must carefully deal with the possible B_INVAL state of
1340191783Srmacklem * the buffer.  ncl_doio() clears B_INVAL (and ncl_asyncio() clears it
1341191783Srmacklem * indirectly), so synchronous reads can be issued without worrying about
1342191783Srmacklem * the B_INVAL state.  We have to be a little more careful when dealing
1343191783Srmacklem * with writes (see comments in nfs_write()) when extending a file past
1344191783Srmacklem * its EOF.
1345191783Srmacklem */
1346191783Srmacklemstatic struct buf *
1347191783Srmacklemnfs_getcacheblk(struct vnode *vp, daddr_t bn, int size, struct thread *td)
1348191783Srmacklem{
1349191783Srmacklem	struct buf *bp;
1350191783Srmacklem	struct mount *mp;
1351191783Srmacklem	struct nfsmount *nmp;
1352191783Srmacklem
1353191783Srmacklem	mp = vp->v_mount;
1354191783Srmacklem	nmp = VFSTONFS(mp);
1355191783Srmacklem
1356191783Srmacklem	if (nmp->nm_flag & NFSMNT_INT) {
1357191783Srmacklem 		sigset_t oldset;
1358191783Srmacklem
1359191783Srmacklem 		ncl_set_sigmask(td, &oldset);
1360191783Srmacklem		bp = getblk(vp, bn, size, PCATCH, 0, 0);
1361191783Srmacklem 		ncl_restore_sigmask(td, &oldset);
1362191783Srmacklem		while (bp == NULL) {
1363191783Srmacklem			if (newnfs_sigintr(nmp, td))
1364191783Srmacklem				return (NULL);
1365191783Srmacklem			bp = getblk(vp, bn, size, 0, 2 * hz, 0);
1366191783Srmacklem		}
1367191783Srmacklem	} else {
1368191783Srmacklem		bp = getblk(vp, bn, size, 0, 0, 0);
1369191783Srmacklem	}
1370191783Srmacklem
1371191783Srmacklem	if (vp->v_type == VREG) {
1372191783Srmacklem		int biosize;
1373191783Srmacklem
1374191783Srmacklem		biosize = mp->mnt_stat.f_iosize;
1375191783Srmacklem		bp->b_blkno = bn * (biosize / DEV_BSIZE);
1376191783Srmacklem	}
1377191783Srmacklem	return (bp);
1378191783Srmacklem}
1379191783Srmacklem
1380191783Srmacklem/*
1381191783Srmacklem * Flush and invalidate all dirty buffers. If another process is already
1382191783Srmacklem * doing the flush, just wait for completion.
1383191783Srmacklem */
1384191783Srmacklemint
1385191783Srmacklemncl_vinvalbuf(struct vnode *vp, int flags, struct thread *td, int intrflg)
1386191783Srmacklem{
1387191783Srmacklem	struct nfsnode *np = VTONFS(vp);
1388191783Srmacklem	struct nfsmount *nmp = VFSTONFS(vp->v_mount);
1389191783Srmacklem	int error = 0, slpflag, slptimeo;
1390191783Srmacklem 	int old_lock = 0;
1391191783Srmacklem
1392191783Srmacklem	ASSERT_VOP_LOCKED(vp, "ncl_vinvalbuf");
1393191783Srmacklem
1394191783Srmacklem	if ((nmp->nm_flag & NFSMNT_INT) == 0)
1395191783Srmacklem		intrflg = 0;
1396191783Srmacklem	if ((nmp->nm_mountp->mnt_kern_flag & MNTK_UNMOUNTF))
1397191783Srmacklem		intrflg = 1;
1398191783Srmacklem	if (intrflg) {
1399191783Srmacklem		slpflag = PCATCH;
1400191783Srmacklem		slptimeo = 2 * hz;
1401191783Srmacklem	} else {
1402191783Srmacklem		slpflag = 0;
1403191783Srmacklem		slptimeo = 0;
1404191783Srmacklem	}
1405191783Srmacklem
1406191783Srmacklem	old_lock = ncl_upgrade_vnlock(vp);
1407193955Srmacklem	if (vp->v_iflag & VI_DOOMED) {
1408193955Srmacklem		/*
1409193955Srmacklem		 * Since vgonel() uses the generic vinvalbuf() to flush
1410193955Srmacklem		 * dirty buffers and it does not call this function, it
1411193955Srmacklem		 * is safe to just return OK when VI_DOOMED is set.
1412193955Srmacklem		 */
1413193955Srmacklem		ncl_downgrade_vnlock(vp, old_lock);
1414193955Srmacklem		return (0);
1415193955Srmacklem	}
1416193955Srmacklem
1417191783Srmacklem	/*
1418191783Srmacklem	 * Now, flush as required.
1419191783Srmacklem	 */
1420191783Srmacklem	if ((flags & V_SAVE) && (vp->v_bufobj.bo_object != NULL)) {
1421191783Srmacklem		VM_OBJECT_LOCK(vp->v_bufobj.bo_object);
1422191783Srmacklem		vm_object_page_clean(vp->v_bufobj.bo_object, 0, 0, OBJPC_SYNC);
1423191783Srmacklem		VM_OBJECT_UNLOCK(vp->v_bufobj.bo_object);
1424191783Srmacklem		/*
1425191783Srmacklem		 * If the page clean was interrupted, fail the invalidation.
1426191783Srmacklem		 * Not doing so, we run the risk of losing dirty pages in the
1427191783Srmacklem		 * vinvalbuf() call below.
1428191783Srmacklem		 */
1429191783Srmacklem		if (intrflg && (error = newnfs_sigintr(nmp, td)))
1430191783Srmacklem			goto out;
1431191783Srmacklem	}
1432191783Srmacklem
1433191783Srmacklem	error = vinvalbuf(vp, flags, slpflag, 0);
1434191783Srmacklem	while (error) {
1435191783Srmacklem		if (intrflg && (error = newnfs_sigintr(nmp, td)))
1436191783Srmacklem			goto out;
1437191783Srmacklem		error = vinvalbuf(vp, flags, 0, slptimeo);
1438191783Srmacklem	}
1439191783Srmacklem	mtx_lock(&np->n_mtx);
1440191783Srmacklem	if (np->n_directio_asyncwr == 0)
1441191783Srmacklem		np->n_flag &= ~NMODIFIED;
1442191783Srmacklem	mtx_unlock(&np->n_mtx);
1443191783Srmacklemout:
1444191783Srmacklem	ncl_downgrade_vnlock(vp, old_lock);
1445191783Srmacklem	return error;
1446191783Srmacklem}
1447191783Srmacklem
1448191783Srmacklem/*
1449191783Srmacklem * Initiate asynchronous I/O. Return an error if no nfsiods are available.
1450191783Srmacklem * This is mainly to avoid queueing async I/O requests when the nfsiods
1451191783Srmacklem * are all hung on a dead server.
1452191783Srmacklem *
1453191783Srmacklem * Note: ncl_asyncio() does not clear (BIO_ERROR|B_INVAL) but when the bp
1454191783Srmacklem * is eventually dequeued by the async daemon, ncl_doio() *will*.
1455191783Srmacklem */
1456191783Srmacklemint
1457191783Srmacklemncl_asyncio(struct nfsmount *nmp, struct buf *bp, struct ucred *cred, struct thread *td)
1458191783Srmacklem{
1459191783Srmacklem	int iod;
1460191783Srmacklem	int gotiod;
1461191783Srmacklem	int slpflag = 0;
1462191783Srmacklem	int slptimeo = 0;
1463191783Srmacklem	int error, error2;
1464191783Srmacklem
1465191783Srmacklem	/*
1466191783Srmacklem	 * Unless iothreadcnt is set > 0, don't bother with async I/O
1467191783Srmacklem	 * threads. For LAN environments, they don't buy any significant
1468191783Srmacklem	 * performance improvement that you can't get with large block
1469191783Srmacklem	 * sizes.
1470191783Srmacklem	 */
1471191783Srmacklem	if (nmp->nm_readahead == 0)
1472191783Srmacklem		return (EPERM);
1473191783Srmacklem
1474191783Srmacklem	/*
1475191783Srmacklem	 * Commits are usually short and sweet so lets save some cpu and
1476191783Srmacklem	 * leave the async daemons for more important rpc's (such as reads
1477191783Srmacklem	 * and writes).
1478191783Srmacklem	 */
1479191783Srmacklem	mtx_lock(&ncl_iod_mutex);
1480191783Srmacklem	if (bp->b_iocmd == BIO_WRITE && (bp->b_flags & B_NEEDCOMMIT) &&
1481191783Srmacklem	    (nmp->nm_bufqiods > ncl_numasync / 2)) {
1482191783Srmacklem		mtx_unlock(&ncl_iod_mutex);
1483191783Srmacklem		return(EIO);
1484191783Srmacklem	}
1485191783Srmacklemagain:
1486191783Srmacklem	if (nmp->nm_flag & NFSMNT_INT)
1487191783Srmacklem		slpflag = PCATCH;
1488191783Srmacklem	gotiod = FALSE;
1489191783Srmacklem
1490191783Srmacklem	/*
1491191783Srmacklem	 * Find a free iod to process this request.
1492191783Srmacklem	 */
1493191783Srmacklem	for (iod = 0; iod < ncl_numasync; iod++)
1494191783Srmacklem		if (ncl_iodwant[iod]) {
1495191783Srmacklem			gotiod = TRUE;
1496191783Srmacklem			break;
1497191783Srmacklem		}
1498191783Srmacklem
1499191783Srmacklem	/*
1500191783Srmacklem	 * Try to create one if none are free.
1501191783Srmacklem	 */
1502191783Srmacklem	if (!gotiod) {
1503191783Srmacklem		iod = ncl_nfsiodnew();
1504191783Srmacklem		if (iod != -1)
1505191783Srmacklem			gotiod = TRUE;
1506191783Srmacklem	}
1507191783Srmacklem
1508191783Srmacklem	if (gotiod) {
1509191783Srmacklem		/*
1510191783Srmacklem		 * Found one, so wake it up and tell it which
1511191783Srmacklem		 * mount to process.
1512191783Srmacklem		 */
1513191783Srmacklem		NFS_DPF(ASYNCIO, ("ncl_asyncio: waking iod %d for mount %p\n",
1514191783Srmacklem		    iod, nmp));
1515191783Srmacklem		ncl_iodwant[iod] = NULL;
1516191783Srmacklem		ncl_iodmount[iod] = nmp;
1517191783Srmacklem		nmp->nm_bufqiods++;
1518191783Srmacklem		wakeup(&ncl_iodwant[iod]);
1519191783Srmacklem	}
1520191783Srmacklem
1521191783Srmacklem	/*
1522191783Srmacklem	 * If none are free, we may already have an iod working on this mount
1523191783Srmacklem	 * point.  If so, it will process our request.
1524191783Srmacklem	 */
1525191783Srmacklem	if (!gotiod) {
1526191783Srmacklem		if (nmp->nm_bufqiods > 0) {
1527191783Srmacklem			NFS_DPF(ASYNCIO,
1528191783Srmacklem				("ncl_asyncio: %d iods are already processing mount %p\n",
1529191783Srmacklem				 nmp->nm_bufqiods, nmp));
1530191783Srmacklem			gotiod = TRUE;
1531191783Srmacklem		}
1532191783Srmacklem	}
1533191783Srmacklem
1534191783Srmacklem	/*
1535191783Srmacklem	 * If we have an iod which can process the request, then queue
1536191783Srmacklem	 * the buffer.
1537191783Srmacklem	 */
1538191783Srmacklem	if (gotiod) {
1539191783Srmacklem		/*
1540191783Srmacklem		 * Ensure that the queue never grows too large.  We still want
1541191783Srmacklem		 * to asynchronize so we block rather then return EIO.
1542191783Srmacklem		 */
1543191783Srmacklem		while (nmp->nm_bufqlen >= 2*ncl_numasync) {
1544191783Srmacklem			NFS_DPF(ASYNCIO,
1545191783Srmacklem				("ncl_asyncio: waiting for mount %p queue to drain\n", nmp));
1546191783Srmacklem			nmp->nm_bufqwant = TRUE;
1547191783Srmacklem 			error = ncl_msleep(td, &nmp->nm_bufq, &ncl_iod_mutex,
1548191783Srmacklem					   slpflag | PRIBIO,
1549191783Srmacklem 					   "nfsaio", slptimeo);
1550191783Srmacklem			if (error) {
1551191783Srmacklem				error2 = newnfs_sigintr(nmp, td);
1552191783Srmacklem				if (error2) {
1553191783Srmacklem					mtx_unlock(&ncl_iod_mutex);
1554191783Srmacklem					return (error2);
1555191783Srmacklem				}
1556191783Srmacklem				if (slpflag == PCATCH) {
1557191783Srmacklem					slpflag = 0;
1558191783Srmacklem					slptimeo = 2 * hz;
1559191783Srmacklem				}
1560191783Srmacklem			}
1561191783Srmacklem			/*
1562191783Srmacklem			 * We might have lost our iod while sleeping,
1563191783Srmacklem			 * so check and loop if nescessary.
1564191783Srmacklem			 */
1565191783Srmacklem			if (nmp->nm_bufqiods == 0) {
1566191783Srmacklem				NFS_DPF(ASYNCIO,
1567191783Srmacklem					("ncl_asyncio: no iods after mount %p queue was drained, looping\n", nmp));
1568191783Srmacklem				goto again;
1569191783Srmacklem			}
1570191783Srmacklem		}
1571191783Srmacklem
1572191783Srmacklem		/* We might have lost our nfsiod */
1573191783Srmacklem		if (nmp->nm_bufqiods == 0) {
1574191783Srmacklem			NFS_DPF(ASYNCIO,
1575191783Srmacklem				("ncl_asyncio: no iods after mount %p queue was drained, looping\n", nmp));
1576191783Srmacklem			goto again;
1577191783Srmacklem		}
1578191783Srmacklem
1579191783Srmacklem		if (bp->b_iocmd == BIO_READ) {
1580191783Srmacklem			if (bp->b_rcred == NOCRED && cred != NOCRED)
1581191783Srmacklem				bp->b_rcred = crhold(cred);
1582191783Srmacklem		} else {
1583191783Srmacklem			if (bp->b_wcred == NOCRED && cred != NOCRED)
1584191783Srmacklem				bp->b_wcred = crhold(cred);
1585191783Srmacklem		}
1586191783Srmacklem
1587191783Srmacklem		if (bp->b_flags & B_REMFREE)
1588191783Srmacklem			bremfreef(bp);
1589191783Srmacklem		BUF_KERNPROC(bp);
1590191783Srmacklem		TAILQ_INSERT_TAIL(&nmp->nm_bufq, bp, b_freelist);
1591191783Srmacklem		nmp->nm_bufqlen++;
1592191783Srmacklem		if ((bp->b_flags & B_DIRECT) && bp->b_iocmd == BIO_WRITE) {
1593191783Srmacklem			mtx_lock(&(VTONFS(bp->b_vp))->n_mtx);
1594191783Srmacklem			VTONFS(bp->b_vp)->n_flag |= NMODIFIED;
1595191783Srmacklem			VTONFS(bp->b_vp)->n_directio_asyncwr++;
1596191783Srmacklem			mtx_unlock(&(VTONFS(bp->b_vp))->n_mtx);
1597191783Srmacklem		}
1598191783Srmacklem		mtx_unlock(&ncl_iod_mutex);
1599191783Srmacklem		return (0);
1600191783Srmacklem	}
1601191783Srmacklem
1602191783Srmacklem	mtx_unlock(&ncl_iod_mutex);
1603191783Srmacklem
1604191783Srmacklem	/*
1605191783Srmacklem	 * All the iods are busy on other mounts, so return EIO to
1606191783Srmacklem	 * force the caller to process the i/o synchronously.
1607191783Srmacklem	 */
1608191783Srmacklem	NFS_DPF(ASYNCIO, ("ncl_asyncio: no iods available, i/o is synchronous\n"));
1609191783Srmacklem	return (EIO);
1610191783Srmacklem}
1611191783Srmacklem
1612191783Srmacklemvoid
1613191783Srmacklemncl_doio_directwrite(struct buf *bp)
1614191783Srmacklem{
1615191783Srmacklem	int iomode, must_commit;
1616191783Srmacklem	struct uio *uiop = (struct uio *)bp->b_caller1;
1617191783Srmacklem	char *iov_base = uiop->uio_iov->iov_base;
1618191783Srmacklem
1619191783Srmacklem	iomode = NFSWRITE_FILESYNC;
1620191783Srmacklem	uiop->uio_td = NULL; /* NULL since we're in nfsiod */
1621191783Srmacklem	ncl_writerpc(bp->b_vp, uiop, bp->b_wcred, &iomode, &must_commit);
1622191783Srmacklem	KASSERT((must_commit == 0), ("ncl_doio_directwrite: Did not commit write"));
1623191783Srmacklem	free(iov_base, M_NFSDIRECTIO);
1624191783Srmacklem	free(uiop->uio_iov, M_NFSDIRECTIO);
1625191783Srmacklem	free(uiop, M_NFSDIRECTIO);
1626191783Srmacklem	if ((bp->b_flags & B_DIRECT) && bp->b_iocmd == BIO_WRITE) {
1627191783Srmacklem		struct nfsnode *np = VTONFS(bp->b_vp);
1628191783Srmacklem		mtx_lock(&np->n_mtx);
1629191783Srmacklem		np->n_directio_asyncwr--;
1630191783Srmacklem		if (np->n_directio_asyncwr == 0) {
1631191783Srmacklem			np->n_flag &= ~NMODIFIED;
1632191783Srmacklem			if ((np->n_flag & NFSYNCWAIT)) {
1633191783Srmacklem				np->n_flag &= ~NFSYNCWAIT;
1634191783Srmacklem				wakeup((caddr_t)&np->n_directio_asyncwr);
1635191783Srmacklem			}
1636191783Srmacklem		}
1637191783Srmacklem		mtx_unlock(&np->n_mtx);
1638191783Srmacklem	}
1639191783Srmacklem	bp->b_vp = NULL;
1640191783Srmacklem	relpbuf(bp, &ncl_pbuf_freecnt);
1641191783Srmacklem}
1642191783Srmacklem
1643191783Srmacklem/*
1644191783Srmacklem * Do an I/O operation to/from a cache block. This may be called
1645191783Srmacklem * synchronously or from an nfsiod.
1646191783Srmacklem */
1647191783Srmacklemint
1648191783Srmacklemncl_doio(struct vnode *vp, struct buf *bp, struct ucred *cr, struct thread *td)
1649191783Srmacklem{
1650191783Srmacklem	struct uio *uiop;
1651191783Srmacklem	struct nfsnode *np;
1652191783Srmacklem	struct nfsmount *nmp;
1653191783Srmacklem	int error = 0, iomode, must_commit = 0;
1654191783Srmacklem	struct uio uio;
1655191783Srmacklem	struct iovec io;
1656191783Srmacklem	struct proc *p = td ? td->td_proc : NULL;
1657191783Srmacklem	uint8_t	iocmd;
1658191783Srmacklem
1659191783Srmacklem	np = VTONFS(vp);
1660191783Srmacklem	nmp = VFSTONFS(vp->v_mount);
1661191783Srmacklem	uiop = &uio;
1662191783Srmacklem	uiop->uio_iov = &io;
1663191783Srmacklem	uiop->uio_iovcnt = 1;
1664191783Srmacklem	uiop->uio_segflg = UIO_SYSSPACE;
1665191783Srmacklem	uiop->uio_td = td;
1666191783Srmacklem
1667191783Srmacklem	/*
1668191783Srmacklem	 * clear BIO_ERROR and B_INVAL state prior to initiating the I/O.  We
1669191783Srmacklem	 * do this here so we do not have to do it in all the code that
1670191783Srmacklem	 * calls us.
1671191783Srmacklem	 */
1672191783Srmacklem	bp->b_flags &= ~B_INVAL;
1673191783Srmacklem	bp->b_ioflags &= ~BIO_ERROR;
1674191783Srmacklem
1675191783Srmacklem	KASSERT(!(bp->b_flags & B_DONE), ("ncl_doio: bp %p already marked done", bp));
1676191783Srmacklem	iocmd = bp->b_iocmd;
1677191783Srmacklem	if (iocmd == BIO_READ) {
1678191783Srmacklem	    io.iov_len = uiop->uio_resid = bp->b_bcount;
1679191783Srmacklem	    io.iov_base = bp->b_data;
1680191783Srmacklem	    uiop->uio_rw = UIO_READ;
1681191783Srmacklem
1682191783Srmacklem	    switch (vp->v_type) {
1683191783Srmacklem	    case VREG:
1684191783Srmacklem		uiop->uio_offset = ((off_t)bp->b_blkno) * DEV_BSIZE;
1685191783Srmacklem		NFSINCRGLOBAL(newnfsstats.read_bios);
1686191783Srmacklem		error = ncl_readrpc(vp, uiop, cr);
1687191783Srmacklem
1688191783Srmacklem		if (!error) {
1689191783Srmacklem		    if (uiop->uio_resid) {
1690191783Srmacklem			/*
1691191783Srmacklem			 * If we had a short read with no error, we must have
1692191783Srmacklem			 * hit a file hole.  We should zero-fill the remainder.
1693191783Srmacklem			 * This can also occur if the server hits the file EOF.
1694191783Srmacklem			 *
1695191783Srmacklem			 * Holes used to be able to occur due to pending
1696191783Srmacklem			 * writes, but that is not possible any longer.
1697191783Srmacklem			 */
1698191783Srmacklem			int nread = bp->b_bcount - uiop->uio_resid;
1699191783Srmacklem			int left  = uiop->uio_resid;
1700191783Srmacklem
1701191783Srmacklem			if (left > 0)
1702191783Srmacklem				bzero((char *)bp->b_data + nread, left);
1703191783Srmacklem			uiop->uio_resid = 0;
1704191783Srmacklem		    }
1705191783Srmacklem		}
1706191783Srmacklem		/* ASSERT_VOP_LOCKED(vp, "ncl_doio"); */
1707191783Srmacklem		if (p && (vp->v_vflag & VV_TEXT)) {
1708191783Srmacklem			mtx_lock(&np->n_mtx);
1709191783Srmacklem			if (NFS_TIMESPEC_COMPARE(&np->n_mtime, &np->n_vattr.na_mtime)) {
1710191783Srmacklem				mtx_unlock(&np->n_mtx);
1711191783Srmacklem				PROC_LOCK(p);
1712191783Srmacklem				killproc(p, "text file modification");
1713191783Srmacklem				PROC_UNLOCK(p);
1714191783Srmacklem			} else
1715191783Srmacklem				mtx_unlock(&np->n_mtx);
1716191783Srmacklem		}
1717191783Srmacklem		break;
1718191783Srmacklem	    case VLNK:
1719191783Srmacklem		uiop->uio_offset = (off_t)0;
1720191783Srmacklem		NFSINCRGLOBAL(newnfsstats.readlink_bios);
1721191783Srmacklem		error = ncl_readlinkrpc(vp, uiop, cr);
1722191783Srmacklem		break;
1723191783Srmacklem	    case VDIR:
1724191783Srmacklem		NFSINCRGLOBAL(newnfsstats.readdir_bios);
1725191783Srmacklem		uiop->uio_offset = ((u_quad_t)bp->b_lblkno) * NFS_DIRBLKSIZ;
1726191783Srmacklem		if ((nmp->nm_flag & NFSMNT_RDIRPLUS) != 0) {
1727191783Srmacklem			error = ncl_readdirplusrpc(vp, uiop, cr, td);
1728191783Srmacklem			if (error == NFSERR_NOTSUPP)
1729191783Srmacklem				nmp->nm_flag &= ~NFSMNT_RDIRPLUS;
1730191783Srmacklem		}
1731191783Srmacklem		if ((nmp->nm_flag & NFSMNT_RDIRPLUS) == 0)
1732191783Srmacklem			error = ncl_readdirrpc(vp, uiop, cr, td);
1733191783Srmacklem		/*
1734191783Srmacklem		 * end-of-directory sets B_INVAL but does not generate an
1735191783Srmacklem		 * error.
1736191783Srmacklem		 */
1737191783Srmacklem		if (error == 0 && uiop->uio_resid == bp->b_bcount)
1738191783Srmacklem			bp->b_flags |= B_INVAL;
1739191783Srmacklem		break;
1740191783Srmacklem	    default:
1741191783Srmacklem		ncl_printf("ncl_doio:  type %x unexpected\n", vp->v_type);
1742191783Srmacklem		break;
1743191783Srmacklem	    };
1744191783Srmacklem	    if (error) {
1745191783Srmacklem		bp->b_ioflags |= BIO_ERROR;
1746191783Srmacklem		bp->b_error = error;
1747191783Srmacklem	    }
1748191783Srmacklem	} else {
1749191783Srmacklem	    /*
1750191783Srmacklem	     * If we only need to commit, try to commit
1751191783Srmacklem	     */
1752191783Srmacklem	    if (bp->b_flags & B_NEEDCOMMIT) {
1753191783Srmacklem		    int retv;
1754191783Srmacklem		    off_t off;
1755191783Srmacklem
1756191783Srmacklem		    off = ((u_quad_t)bp->b_blkno) * DEV_BSIZE + bp->b_dirtyoff;
1757191783Srmacklem		    retv = ncl_commit(vp, off, bp->b_dirtyend-bp->b_dirtyoff,
1758191783Srmacklem			bp->b_wcred, td);
1759191783Srmacklem		    if (retv == 0) {
1760191783Srmacklem			    bp->b_dirtyoff = bp->b_dirtyend = 0;
1761191783Srmacklem			    bp->b_flags &= ~(B_NEEDCOMMIT | B_CLUSTEROK);
1762191783Srmacklem			    bp->b_resid = 0;
1763191783Srmacklem			    bufdone(bp);
1764191783Srmacklem			    return (0);
1765191783Srmacklem		    }
1766191783Srmacklem		    if (retv == NFSERR_STALEWRITEVERF) {
1767191783Srmacklem			    ncl_clearcommit(vp->v_mount);
1768191783Srmacklem		    }
1769191783Srmacklem	    }
1770191783Srmacklem
1771191783Srmacklem	    /*
1772191783Srmacklem	     * Setup for actual write
1773191783Srmacklem	     */
1774191783Srmacklem	    mtx_lock(&np->n_mtx);
1775191783Srmacklem	    if ((off_t)bp->b_blkno * DEV_BSIZE + bp->b_dirtyend > np->n_size)
1776191783Srmacklem		bp->b_dirtyend = np->n_size - (off_t)bp->b_blkno * DEV_BSIZE;
1777191783Srmacklem	    mtx_unlock(&np->n_mtx);
1778191783Srmacklem
1779191783Srmacklem	    if (bp->b_dirtyend > bp->b_dirtyoff) {
1780191783Srmacklem		io.iov_len = uiop->uio_resid = bp->b_dirtyend
1781191783Srmacklem		    - bp->b_dirtyoff;
1782191783Srmacklem		uiop->uio_offset = (off_t)bp->b_blkno * DEV_BSIZE
1783191783Srmacklem		    + bp->b_dirtyoff;
1784191783Srmacklem		io.iov_base = (char *)bp->b_data + bp->b_dirtyoff;
1785191783Srmacklem		uiop->uio_rw = UIO_WRITE;
1786191783Srmacklem		NFSINCRGLOBAL(newnfsstats.write_bios);
1787191783Srmacklem
1788191783Srmacklem		if ((bp->b_flags & (B_ASYNC | B_NEEDCOMMIT | B_NOCACHE | B_CLUSTER)) == B_ASYNC)
1789191783Srmacklem		    iomode = NFSWRITE_UNSTABLE;
1790191783Srmacklem		else
1791191783Srmacklem		    iomode = NFSWRITE_FILESYNC;
1792191783Srmacklem
1793191783Srmacklem		error = ncl_writerpc(vp, uiop, cr, &iomode, &must_commit);
1794191783Srmacklem
1795191783Srmacklem		/*
1796191783Srmacklem		 * When setting B_NEEDCOMMIT also set B_CLUSTEROK to try
1797191783Srmacklem		 * to cluster the buffers needing commit.  This will allow
1798191783Srmacklem		 * the system to submit a single commit rpc for the whole
1799191783Srmacklem		 * cluster.  We can do this even if the buffer is not 100%
1800191783Srmacklem		 * dirty (relative to the NFS blocksize), so we optimize the
1801191783Srmacklem		 * append-to-file-case.
1802191783Srmacklem		 *
1803191783Srmacklem		 * (when clearing B_NEEDCOMMIT, B_CLUSTEROK must also be
1804191783Srmacklem		 * cleared because write clustering only works for commit
1805191783Srmacklem		 * rpc's, not for the data portion of the write).
1806191783Srmacklem		 */
1807191783Srmacklem
1808191783Srmacklem		if (!error && iomode == NFSWRITE_UNSTABLE) {
1809191783Srmacklem		    bp->b_flags |= B_NEEDCOMMIT;
1810191783Srmacklem		    if (bp->b_dirtyoff == 0
1811191783Srmacklem			&& bp->b_dirtyend == bp->b_bcount)
1812191783Srmacklem			bp->b_flags |= B_CLUSTEROK;
1813191783Srmacklem		} else {
1814191783Srmacklem		    bp->b_flags &= ~(B_NEEDCOMMIT | B_CLUSTEROK);
1815191783Srmacklem		}
1816191783Srmacklem
1817191783Srmacklem		/*
1818191783Srmacklem		 * For an interrupted write, the buffer is still valid
1819191783Srmacklem		 * and the write hasn't been pushed to the server yet,
1820191783Srmacklem		 * so we can't set BIO_ERROR and report the interruption
1821191783Srmacklem		 * by setting B_EINTR. For the B_ASYNC case, B_EINTR
1822191783Srmacklem		 * is not relevant, so the rpc attempt is essentially
1823191783Srmacklem		 * a noop.  For the case of a V3 write rpc not being
1824191783Srmacklem		 * committed to stable storage, the block is still
1825191783Srmacklem		 * dirty and requires either a commit rpc or another
1826191783Srmacklem		 * write rpc with iomode == NFSV3WRITE_FILESYNC before
1827191783Srmacklem		 * the block is reused. This is indicated by setting
1828191783Srmacklem		 * the B_DELWRI and B_NEEDCOMMIT flags.
1829191783Srmacklem		 *
1830191783Srmacklem		 * If the buffer is marked B_PAGING, it does not reside on
1831191783Srmacklem		 * the vp's paging queues so we cannot call bdirty().  The
1832191783Srmacklem		 * bp in this case is not an NFS cache block so we should
1833191783Srmacklem		 * be safe. XXX
1834191783Srmacklem		 *
1835191783Srmacklem		 * The logic below breaks up errors into recoverable and
1836191783Srmacklem		 * unrecoverable. For the former, we clear B_INVAL|B_NOCACHE
1837191783Srmacklem		 * and keep the buffer around for potential write retries.
1838191783Srmacklem		 * For the latter (eg ESTALE), we toss the buffer away (B_INVAL)
1839191783Srmacklem		 * and save the error in the nfsnode. This is less than ideal
1840191783Srmacklem		 * but necessary. Keeping such buffers around could potentially
1841191783Srmacklem		 * cause buffer exhaustion eventually (they can never be written
1842191783Srmacklem		 * out, so will get constantly be re-dirtied). It also causes
1843191783Srmacklem		 * all sorts of vfs panics. For non-recoverable write errors,
1844191783Srmacklem		 * also invalidate the attrcache, so we'll be forced to go over
1845191783Srmacklem		 * the wire for this object, returning an error to user on next
1846191783Srmacklem		 * call (most of the time).
1847191783Srmacklem		 */
1848191783Srmacklem    		if (error == EINTR || error == EIO || error == ETIMEDOUT
1849191783Srmacklem		    || (!error && (bp->b_flags & B_NEEDCOMMIT))) {
1850191783Srmacklem			int s;
1851191783Srmacklem
1852191783Srmacklem			s = splbio();
1853191783Srmacklem			bp->b_flags &= ~(B_INVAL|B_NOCACHE);
1854191783Srmacklem			if ((bp->b_flags & B_PAGING) == 0) {
1855191783Srmacklem			    bdirty(bp);
1856191783Srmacklem			    bp->b_flags &= ~B_DONE;
1857191783Srmacklem			}
1858191783Srmacklem			if (error && (bp->b_flags & B_ASYNC) == 0)
1859191783Srmacklem			    bp->b_flags |= B_EINTR;
1860191783Srmacklem			splx(s);
1861191783Srmacklem	    	} else {
1862191783Srmacklem		    if (error) {
1863191783Srmacklem			bp->b_ioflags |= BIO_ERROR;
1864191783Srmacklem			bp->b_flags |= B_INVAL;
1865191783Srmacklem			bp->b_error = np->n_error = error;
1866191783Srmacklem			mtx_lock(&np->n_mtx);
1867191783Srmacklem			np->n_flag |= NWRITEERR;
1868191783Srmacklem			np->n_attrstamp = 0;
1869191783Srmacklem			mtx_unlock(&np->n_mtx);
1870191783Srmacklem		    }
1871191783Srmacklem		    bp->b_dirtyoff = bp->b_dirtyend = 0;
1872191783Srmacklem		}
1873191783Srmacklem	    } else {
1874191783Srmacklem		bp->b_resid = 0;
1875191783Srmacklem		bufdone(bp);
1876191783Srmacklem		return (0);
1877191783Srmacklem	    }
1878191783Srmacklem	}
1879191783Srmacklem	bp->b_resid = uiop->uio_resid;
1880191783Srmacklem	if (must_commit)
1881191783Srmacklem	    ncl_clearcommit(vp->v_mount);
1882191783Srmacklem	bufdone(bp);
1883191783Srmacklem	return (error);
1884191783Srmacklem}
1885191783Srmacklem
1886191783Srmacklem/*
1887191783Srmacklem * Used to aid in handling ftruncate() operations on the NFS client side.
1888191783Srmacklem * Truncation creates a number of special problems for NFS.  We have to
1889191783Srmacklem * throw away VM pages and buffer cache buffers that are beyond EOF, and
1890191783Srmacklem * we have to properly handle VM pages or (potentially dirty) buffers
1891191783Srmacklem * that straddle the truncation point.
1892191783Srmacklem */
1893191783Srmacklem
1894191783Srmacklemint
1895191783Srmacklemncl_meta_setsize(struct vnode *vp, struct ucred *cred, struct thread *td, u_quad_t nsize)
1896191783Srmacklem{
1897191783Srmacklem	struct nfsnode *np = VTONFS(vp);
1898191783Srmacklem	u_quad_t tsize;
1899191783Srmacklem	int biosize = vp->v_mount->mnt_stat.f_iosize;
1900191783Srmacklem	int error = 0;
1901191783Srmacklem
1902191783Srmacklem	mtx_lock(&np->n_mtx);
1903191783Srmacklem	tsize = np->n_size;
1904191783Srmacklem	np->n_size = nsize;
1905191783Srmacklem	mtx_unlock(&np->n_mtx);
1906191783Srmacklem
1907191783Srmacklem	if (nsize < tsize) {
1908191783Srmacklem		struct buf *bp;
1909191783Srmacklem		daddr_t lbn;
1910191783Srmacklem		int bufsize;
1911191783Srmacklem
1912191783Srmacklem		/*
1913191783Srmacklem		 * vtruncbuf() doesn't get the buffer overlapping the
1914191783Srmacklem		 * truncation point.  We may have a B_DELWRI and/or B_CACHE
1915191783Srmacklem		 * buffer that now needs to be truncated.
1916191783Srmacklem		 */
1917191783Srmacklem		error = vtruncbuf(vp, cred, td, nsize, biosize);
1918191783Srmacklem		lbn = nsize / biosize;
1919191783Srmacklem		bufsize = nsize & (biosize - 1);
1920191783Srmacklem		bp = nfs_getcacheblk(vp, lbn, bufsize, td);
1921191783Srmacklem 		if (!bp)
1922191783Srmacklem 			return EINTR;
1923191783Srmacklem		if (bp->b_dirtyoff > bp->b_bcount)
1924191783Srmacklem			bp->b_dirtyoff = bp->b_bcount;
1925191783Srmacklem		if (bp->b_dirtyend > bp->b_bcount)
1926191783Srmacklem			bp->b_dirtyend = bp->b_bcount;
1927191783Srmacklem		bp->b_flags |= B_RELBUF;  /* don't leave garbage around */
1928191783Srmacklem		brelse(bp);
1929191783Srmacklem	} else {
1930191783Srmacklem		vnode_pager_setsize(vp, nsize);
1931191783Srmacklem	}
1932191783Srmacklem	return(error);
1933191783Srmacklem}
1934191783Srmacklem
1935