nfs_clbio.c revision 239852
1191783Srmacklem/*-
2191783Srmacklem * Copyright (c) 1989, 1993
3191783Srmacklem *	The Regents of the University of California.  All rights reserved.
4191783Srmacklem *
5191783Srmacklem * This code is derived from software contributed to Berkeley by
6191783Srmacklem * Rick Macklem at The University of Guelph.
7191783Srmacklem *
8191783Srmacklem * Redistribution and use in source and binary forms, with or without
9191783Srmacklem * modification, are permitted provided that the following conditions
10191783Srmacklem * are met:
11191783Srmacklem * 1. Redistributions of source code must retain the above copyright
12191783Srmacklem *    notice, this list of conditions and the following disclaimer.
13191783Srmacklem * 2. Redistributions in binary form must reproduce the above copyright
14191783Srmacklem *    notice, this list of conditions and the following disclaimer in the
15191783Srmacklem *    documentation and/or other materials provided with the distribution.
16191783Srmacklem * 4. Neither the name of the University nor the names of its contributors
17191783Srmacklem *    may be used to endorse or promote products derived from this software
18191783Srmacklem *    without specific prior written permission.
19191783Srmacklem *
20191783Srmacklem * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
21191783Srmacklem * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22191783Srmacklem * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23191783Srmacklem * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
24191783Srmacklem * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25191783Srmacklem * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
26191783Srmacklem * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27191783Srmacklem * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28191783Srmacklem * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
29191783Srmacklem * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30191783Srmacklem * SUCH DAMAGE.
31191783Srmacklem *
32191783Srmacklem *	@(#)nfs_bio.c	8.9 (Berkeley) 3/30/95
33191783Srmacklem */
34191783Srmacklem
35191783Srmacklem#include <sys/cdefs.h>
36191783Srmacklem__FBSDID("$FreeBSD: stable/9/sys/fs/nfsclient/nfs_clbio.c 239852 2012-08-29 15:58:44Z kib $");
37191783Srmacklem
38223280Srmacklem#include "opt_kdtrace.h"
39223280Srmacklem
40191783Srmacklem#include <sys/param.h>
41191783Srmacklem#include <sys/systm.h>
42191783Srmacklem#include <sys/bio.h>
43191783Srmacklem#include <sys/buf.h>
44191783Srmacklem#include <sys/kernel.h>
45191783Srmacklem#include <sys/mount.h>
46191783Srmacklem#include <sys/vmmeter.h>
47191783Srmacklem#include <sys/vnode.h>
48191783Srmacklem
49191783Srmacklem#include <vm/vm.h>
50191783Srmacklem#include <vm/vm_extern.h>
51191783Srmacklem#include <vm/vm_page.h>
52191783Srmacklem#include <vm/vm_object.h>
53191783Srmacklem#include <vm/vm_pager.h>
54191783Srmacklem#include <vm/vnode_pager.h>
55191783Srmacklem
56191783Srmacklem#include <fs/nfs/nfsport.h>
57191783Srmacklem#include <fs/nfsclient/nfsmount.h>
58191783Srmacklem#include <fs/nfsclient/nfs.h>
59191783Srmacklem#include <fs/nfsclient/nfsnode.h>
60223280Srmacklem#include <fs/nfsclient/nfs_kdtrace.h>
61191783Srmacklem
62191783Srmacklemextern int newnfs_directio_allow_mmap;
63191783Srmacklemextern struct nfsstats newnfsstats;
64191783Srmacklemextern struct mtx ncl_iod_mutex;
65191783Srmacklemextern int ncl_numasync;
66220683Srmacklemextern enum nfsiod_state ncl_iodwant[NFS_MAXASYNCDAEMON];
67220683Srmacklemextern struct nfsmount *ncl_iodmount[NFS_MAXASYNCDAEMON];
68191783Srmacklemextern int newnfs_directio_enable;
69233730Skibextern int nfs_keep_dirty_on_error;
70191783Srmacklem
71191783Srmacklemint ncl_pbuf_freecnt = -1;	/* start out unlimited */
72191783Srmacklem
73191783Srmacklemstatic struct buf *nfs_getcacheblk(struct vnode *vp, daddr_t bn, int size,
74191783Srmacklem    struct thread *td);
75191783Srmacklemstatic int nfs_directio_write(struct vnode *vp, struct uio *uiop,
76191783Srmacklem    struct ucred *cred, int ioflag);
77191783Srmacklem
78191783Srmacklem/*
79191783Srmacklem * Vnode op for VM getpages.
80191783Srmacklem */
81191783Srmacklemint
82191783Srmacklemncl_getpages(struct vop_getpages_args *ap)
83191783Srmacklem{
84191783Srmacklem	int i, error, nextoff, size, toff, count, npages;
85191783Srmacklem	struct uio uio;
86191783Srmacklem	struct iovec iov;
87191783Srmacklem	vm_offset_t kva;
88191783Srmacklem	struct buf *bp;
89191783Srmacklem	struct vnode *vp;
90191783Srmacklem	struct thread *td;
91191783Srmacklem	struct ucred *cred;
92191783Srmacklem	struct nfsmount *nmp;
93191783Srmacklem	vm_object_t object;
94191783Srmacklem	vm_page_t *pages;
95191783Srmacklem	struct nfsnode *np;
96191783Srmacklem
97191783Srmacklem	vp = ap->a_vp;
98191783Srmacklem	np = VTONFS(vp);
99191783Srmacklem	td = curthread;				/* XXX */
100191783Srmacklem	cred = curthread->td_ucred;		/* XXX */
101191783Srmacklem	nmp = VFSTONFS(vp->v_mount);
102191783Srmacklem	pages = ap->a_m;
103191783Srmacklem	count = ap->a_count;
104191783Srmacklem
105191783Srmacklem	if ((object = vp->v_object) == NULL) {
106191783Srmacklem		ncl_printf("nfs_getpages: called with non-merged cache vnode??\n");
107194425Salc		return (VM_PAGER_ERROR);
108191783Srmacklem	}
109191783Srmacklem
110191783Srmacklem	if (newnfs_directio_enable && !newnfs_directio_allow_mmap) {
111191783Srmacklem		mtx_lock(&np->n_mtx);
112191783Srmacklem		if ((np->n_flag & NNONCACHE) && (vp->v_type == VREG)) {
113191783Srmacklem			mtx_unlock(&np->n_mtx);
114191783Srmacklem			ncl_printf("nfs_getpages: called on non-cacheable vnode??\n");
115194425Salc			return (VM_PAGER_ERROR);
116191783Srmacklem		} else
117191783Srmacklem			mtx_unlock(&np->n_mtx);
118191783Srmacklem	}
119191783Srmacklem
120191783Srmacklem	mtx_lock(&nmp->nm_mtx);
121191783Srmacklem	if ((nmp->nm_flag & NFSMNT_NFSV3) != 0 &&
122191783Srmacklem	    (nmp->nm_state & NFSSTA_GOTFSINFO) == 0) {
123191783Srmacklem		mtx_unlock(&nmp->nm_mtx);
124191783Srmacklem		/* We'll never get here for v4, because we always have fsinfo */
125191783Srmacklem		(void)ncl_fsinfo(nmp, vp, cred, td);
126191783Srmacklem	} else
127191783Srmacklem		mtx_unlock(&nmp->nm_mtx);
128191783Srmacklem
129191783Srmacklem	npages = btoc(count);
130191783Srmacklem
131191783Srmacklem	/*
132191783Srmacklem	 * If the requested page is partially valid, just return it and
133191783Srmacklem	 * allow the pager to zero-out the blanks.  Partially valid pages
134191783Srmacklem	 * can only occur at the file EOF.
135191783Srmacklem	 */
136194425Salc	VM_OBJECT_LOCK(object);
137194425Salc	if (pages[ap->a_reqpage]->valid != 0) {
138194425Salc		for (i = 0; i < npages; ++i) {
139207669Salc			if (i != ap->a_reqpage) {
140207669Salc				vm_page_lock(pages[i]);
141194425Salc				vm_page_free(pages[i]);
142207669Salc				vm_page_unlock(pages[i]);
143207669Salc			}
144191783Srmacklem		}
145191783Srmacklem		VM_OBJECT_UNLOCK(object);
146194425Salc		return (0);
147191783Srmacklem	}
148194425Salc	VM_OBJECT_UNLOCK(object);
149191783Srmacklem
150191783Srmacklem	/*
151191783Srmacklem	 * We use only the kva address for the buffer, but this is extremely
152191783Srmacklem	 * convienient and fast.
153191783Srmacklem	 */
154191783Srmacklem	bp = getpbuf(&ncl_pbuf_freecnt);
155191783Srmacklem
156191783Srmacklem	kva = (vm_offset_t) bp->b_data;
157191783Srmacklem	pmap_qenter(kva, pages, npages);
158191783Srmacklem	PCPU_INC(cnt.v_vnodein);
159191783Srmacklem	PCPU_ADD(cnt.v_vnodepgsin, npages);
160191783Srmacklem
161191783Srmacklem	iov.iov_base = (caddr_t) kva;
162191783Srmacklem	iov.iov_len = count;
163191783Srmacklem	uio.uio_iov = &iov;
164191783Srmacklem	uio.uio_iovcnt = 1;
165191783Srmacklem	uio.uio_offset = IDX_TO_OFF(pages[0]->pindex);
166191783Srmacklem	uio.uio_resid = count;
167191783Srmacklem	uio.uio_segflg = UIO_SYSSPACE;
168191783Srmacklem	uio.uio_rw = UIO_READ;
169191783Srmacklem	uio.uio_td = td;
170191783Srmacklem
171191783Srmacklem	error = ncl_readrpc(vp, &uio, cred);
172191783Srmacklem	pmap_qremove(kva, npages);
173191783Srmacklem
174191783Srmacklem	relpbuf(bp, &ncl_pbuf_freecnt);
175191783Srmacklem
176191783Srmacklem	if (error && (uio.uio_resid == count)) {
177191783Srmacklem		ncl_printf("nfs_getpages: error %d\n", error);
178191783Srmacklem		VM_OBJECT_LOCK(object);
179191783Srmacklem		for (i = 0; i < npages; ++i) {
180207669Salc			if (i != ap->a_reqpage) {
181207669Salc				vm_page_lock(pages[i]);
182191783Srmacklem				vm_page_free(pages[i]);
183207669Salc				vm_page_unlock(pages[i]);
184207669Salc			}
185191783Srmacklem		}
186191783Srmacklem		VM_OBJECT_UNLOCK(object);
187194425Salc		return (VM_PAGER_ERROR);
188191783Srmacklem	}
189191783Srmacklem
190191783Srmacklem	/*
191191783Srmacklem	 * Calculate the number of bytes read and validate only that number
192191783Srmacklem	 * of bytes.  Note that due to pending writes, size may be 0.  This
193191783Srmacklem	 * does not mean that the remaining data is invalid!
194191783Srmacklem	 */
195191783Srmacklem
196191783Srmacklem	size = count - uio.uio_resid;
197191783Srmacklem	VM_OBJECT_LOCK(object);
198191783Srmacklem	for (i = 0, toff = 0; i < npages; i++, toff = nextoff) {
199191783Srmacklem		vm_page_t m;
200191783Srmacklem		nextoff = toff + PAGE_SIZE;
201191783Srmacklem		m = pages[i];
202191783Srmacklem
203191783Srmacklem		if (nextoff <= size) {
204191783Srmacklem			/*
205191783Srmacklem			 * Read operation filled an entire page
206191783Srmacklem			 */
207191783Srmacklem			m->valid = VM_PAGE_BITS_ALL;
208192065Srmacklem			KASSERT(m->dirty == 0,
209192065Srmacklem			    ("nfs_getpages: page %p is dirty", m));
210191783Srmacklem		} else if (size > toff) {
211191783Srmacklem			/*
212191783Srmacklem			 * Read operation filled a partial page.
213191783Srmacklem			 */
214191783Srmacklem			m->valid = 0;
215192231Srmacklem			vm_page_set_valid(m, 0, size - toff);
216192986Salc			KASSERT(m->dirty == 0,
217192231Srmacklem			    ("nfs_getpages: page %p is dirty", m));
218191783Srmacklem		} else {
219191783Srmacklem			/*
220239554Skib			 * Read operation was short.  If no error
221239554Skib			 * occured we may have hit a zero-fill
222239554Skib			 * section.  We leave valid set to 0, and page
223239554Skib			 * is freed by vm_page_readahead_finish() if
224239554Skib			 * its index is not equal to requested, or
225239554Skib			 * page is zeroed and set valid by
226239554Skib			 * vm_pager_get_pages() for requested page.
227191783Srmacklem			 */
228191783Srmacklem			;
229191783Srmacklem		}
230239554Skib		if (i != ap->a_reqpage)
231239554Skib			vm_page_readahead_finish(m);
232191783Srmacklem	}
233191783Srmacklem	VM_OBJECT_UNLOCK(object);
234194425Salc	return (0);
235191783Srmacklem}
236191783Srmacklem
237191783Srmacklem/*
238191783Srmacklem * Vnode op for VM putpages.
239191783Srmacklem */
240191783Srmacklemint
241191783Srmacklemncl_putpages(struct vop_putpages_args *ap)
242191783Srmacklem{
243191783Srmacklem	struct uio uio;
244191783Srmacklem	struct iovec iov;
245191783Srmacklem	vm_offset_t kva;
246191783Srmacklem	struct buf *bp;
247191783Srmacklem	int iomode, must_commit, i, error, npages, count;
248191783Srmacklem	off_t offset;
249191783Srmacklem	int *rtvals;
250191783Srmacklem	struct vnode *vp;
251191783Srmacklem	struct thread *td;
252191783Srmacklem	struct ucred *cred;
253191783Srmacklem	struct nfsmount *nmp;
254191783Srmacklem	struct nfsnode *np;
255191783Srmacklem	vm_page_t *pages;
256191783Srmacklem
257191783Srmacklem	vp = ap->a_vp;
258191783Srmacklem	np = VTONFS(vp);
259191783Srmacklem	td = curthread;				/* XXX */
260236096Srmacklem	/* Set the cred to n_writecred for the write rpcs. */
261236096Srmacklem	if (np->n_writecred != NULL)
262236096Srmacklem		cred = crhold(np->n_writecred);
263236096Srmacklem	else
264236096Srmacklem		cred = crhold(curthread->td_ucred);	/* XXX */
265191783Srmacklem	nmp = VFSTONFS(vp->v_mount);
266191783Srmacklem	pages = ap->a_m;
267191783Srmacklem	count = ap->a_count;
268191783Srmacklem	rtvals = ap->a_rtvals;
269191783Srmacklem	npages = btoc(count);
270191783Srmacklem	offset = IDX_TO_OFF(pages[0]->pindex);
271191783Srmacklem
272191783Srmacklem	mtx_lock(&nmp->nm_mtx);
273191783Srmacklem	if ((nmp->nm_flag & NFSMNT_NFSV3) != 0 &&
274191783Srmacklem	    (nmp->nm_state & NFSSTA_GOTFSINFO) == 0) {
275191783Srmacklem		mtx_unlock(&nmp->nm_mtx);
276191783Srmacklem		(void)ncl_fsinfo(nmp, vp, cred, td);
277191783Srmacklem	} else
278191783Srmacklem		mtx_unlock(&nmp->nm_mtx);
279191783Srmacklem
280191783Srmacklem	mtx_lock(&np->n_mtx);
281191783Srmacklem	if (newnfs_directio_enable && !newnfs_directio_allow_mmap &&
282191783Srmacklem	    (np->n_flag & NNONCACHE) && (vp->v_type == VREG)) {
283191783Srmacklem		mtx_unlock(&np->n_mtx);
284191783Srmacklem		ncl_printf("ncl_putpages: called on noncache-able vnode??\n");
285191783Srmacklem		mtx_lock(&np->n_mtx);
286191783Srmacklem	}
287191783Srmacklem
288191783Srmacklem	for (i = 0; i < npages; i++)
289222586Skib		rtvals[i] = VM_PAGER_ERROR;
290191783Srmacklem
291191783Srmacklem	/*
292191783Srmacklem	 * When putting pages, do not extend file past EOF.
293191783Srmacklem	 */
294191783Srmacklem	if (offset + count > np->n_size) {
295191783Srmacklem		count = np->n_size - offset;
296191783Srmacklem		if (count < 0)
297191783Srmacklem			count = 0;
298191783Srmacklem	}
299191783Srmacklem	mtx_unlock(&np->n_mtx);
300191783Srmacklem
301191783Srmacklem	/*
302191783Srmacklem	 * We use only the kva address for the buffer, but this is extremely
303191783Srmacklem	 * convienient and fast.
304191783Srmacklem	 */
305191783Srmacklem	bp = getpbuf(&ncl_pbuf_freecnt);
306191783Srmacklem
307191783Srmacklem	kva = (vm_offset_t) bp->b_data;
308191783Srmacklem	pmap_qenter(kva, pages, npages);
309191783Srmacklem	PCPU_INC(cnt.v_vnodeout);
310191783Srmacklem	PCPU_ADD(cnt.v_vnodepgsout, count);
311191783Srmacklem
312191783Srmacklem	iov.iov_base = (caddr_t) kva;
313191783Srmacklem	iov.iov_len = count;
314191783Srmacklem	uio.uio_iov = &iov;
315191783Srmacklem	uio.uio_iovcnt = 1;
316191783Srmacklem	uio.uio_offset = offset;
317191783Srmacklem	uio.uio_resid = count;
318191783Srmacklem	uio.uio_segflg = UIO_SYSSPACE;
319191783Srmacklem	uio.uio_rw = UIO_WRITE;
320191783Srmacklem	uio.uio_td = td;
321191783Srmacklem
322191783Srmacklem	if ((ap->a_sync & VM_PAGER_PUT_SYNC) == 0)
323191783Srmacklem	    iomode = NFSWRITE_UNSTABLE;
324191783Srmacklem	else
325191783Srmacklem	    iomode = NFSWRITE_FILESYNC;
326191783Srmacklem
327207082Srmacklem	error = ncl_writerpc(vp, &uio, cred, &iomode, &must_commit, 0);
328236096Srmacklem	crfree(cred);
329191783Srmacklem
330191783Srmacklem	pmap_qremove(kva, npages);
331191783Srmacklem	relpbuf(bp, &ncl_pbuf_freecnt);
332191783Srmacklem
333233730Skib	if (error == 0 || !nfs_keep_dirty_on_error) {
334233730Skib		vnode_pager_undirty_pages(pages, rtvals, count - uio.uio_resid);
335233730Skib		if (must_commit)
336233730Skib			ncl_clearcommit(vp->v_mount);
337233730Skib	}
338191783Srmacklem	return rtvals[0];
339191783Srmacklem}
340191783Srmacklem
341191783Srmacklem/*
342191783Srmacklem * For nfs, cache consistency can only be maintained approximately.
343191783Srmacklem * Although RFC1094 does not specify the criteria, the following is
344191783Srmacklem * believed to be compatible with the reference port.
345191783Srmacklem * For nfs:
346191783Srmacklem * If the file's modify time on the server has changed since the
347191783Srmacklem * last read rpc or you have written to the file,
348191783Srmacklem * you may have lost data cache consistency with the
349191783Srmacklem * server, so flush all of the file's data out of the cache.
350191783Srmacklem * Then force a getattr rpc to ensure that you have up to date
351191783Srmacklem * attributes.
352191783Srmacklem * NB: This implies that cache data can be read when up to
353191783Srmacklem * NFS_ATTRTIMEO seconds out of date. If you find that you need current
354191783Srmacklem * attributes this could be forced by setting n_attrstamp to 0 before
355191783Srmacklem * the VOP_GETATTR() call.
356191783Srmacklem */
357191783Srmacklemstatic inline int
358191783Srmacklemnfs_bioread_check_cons(struct vnode *vp, struct thread *td, struct ucred *cred)
359191783Srmacklem{
360191783Srmacklem	int error = 0;
361191783Srmacklem	struct vattr vattr;
362191783Srmacklem	struct nfsnode *np = VTONFS(vp);
363191783Srmacklem	int old_lock;
364191783Srmacklem
365191783Srmacklem	/*
366191783Srmacklem	 * Grab the exclusive lock before checking whether the cache is
367191783Srmacklem	 * consistent.
368191783Srmacklem	 * XXX - We can make this cheaper later (by acquiring cheaper locks).
369191783Srmacklem	 * But for now, this suffices.
370191783Srmacklem	 */
371191783Srmacklem	old_lock = ncl_upgrade_vnlock(vp);
372193955Srmacklem	if (vp->v_iflag & VI_DOOMED) {
373193955Srmacklem		ncl_downgrade_vnlock(vp, old_lock);
374193955Srmacklem		return (EBADF);
375193955Srmacklem	}
376193955Srmacklem
377191783Srmacklem	mtx_lock(&np->n_mtx);
378191783Srmacklem	if (np->n_flag & NMODIFIED) {
379191783Srmacklem		mtx_unlock(&np->n_mtx);
380191783Srmacklem		if (vp->v_type != VREG) {
381191783Srmacklem			if (vp->v_type != VDIR)
382191783Srmacklem				panic("nfs: bioread, not dir");
383191783Srmacklem			ncl_invaldir(vp);
384191783Srmacklem			error = ncl_vinvalbuf(vp, V_SAVE, td, 1);
385191783Srmacklem			if (error)
386191783Srmacklem				goto out;
387191783Srmacklem		}
388191783Srmacklem		np->n_attrstamp = 0;
389223280Srmacklem		KDTRACE_NFS_ATTRCACHE_FLUSH_DONE(vp);
390191783Srmacklem		error = VOP_GETATTR(vp, &vattr, cred);
391191783Srmacklem		if (error)
392191783Srmacklem			goto out;
393191783Srmacklem		mtx_lock(&np->n_mtx);
394191783Srmacklem		np->n_mtime = vattr.va_mtime;
395191783Srmacklem		mtx_unlock(&np->n_mtx);
396191783Srmacklem	} else {
397191783Srmacklem		mtx_unlock(&np->n_mtx);
398191783Srmacklem		error = VOP_GETATTR(vp, &vattr, cred);
399191783Srmacklem		if (error)
400191783Srmacklem			return (error);
401191783Srmacklem		mtx_lock(&np->n_mtx);
402191783Srmacklem		if ((np->n_flag & NSIZECHANGED)
403191783Srmacklem		    || (NFS_TIMESPEC_COMPARE(&np->n_mtime, &vattr.va_mtime))) {
404191783Srmacklem			mtx_unlock(&np->n_mtx);
405191783Srmacklem			if (vp->v_type == VDIR)
406191783Srmacklem				ncl_invaldir(vp);
407191783Srmacklem			error = ncl_vinvalbuf(vp, V_SAVE, td, 1);
408191783Srmacklem			if (error)
409191783Srmacklem				goto out;
410191783Srmacklem			mtx_lock(&np->n_mtx);
411191783Srmacklem			np->n_mtime = vattr.va_mtime;
412191783Srmacklem			np->n_flag &= ~NSIZECHANGED;
413191783Srmacklem		}
414191783Srmacklem		mtx_unlock(&np->n_mtx);
415191783Srmacklem	}
416191783Srmacklemout:
417191783Srmacklem	ncl_downgrade_vnlock(vp, old_lock);
418191783Srmacklem	return error;
419191783Srmacklem}
420191783Srmacklem
421191783Srmacklem/*
422191783Srmacklem * Vnode op for read using bio
423191783Srmacklem */
424191783Srmacklemint
425191783Srmacklemncl_bioread(struct vnode *vp, struct uio *uio, int ioflag, struct ucred *cred)
426191783Srmacklem{
427191783Srmacklem	struct nfsnode *np = VTONFS(vp);
428191783Srmacklem	int biosize, i;
429191783Srmacklem	struct buf *bp, *rabp;
430191783Srmacklem	struct thread *td;
431191783Srmacklem	struct nfsmount *nmp = VFSTONFS(vp->v_mount);
432191783Srmacklem	daddr_t lbn, rabn;
433191783Srmacklem	int bcount;
434191783Srmacklem	int seqcount;
435191783Srmacklem	int nra, error = 0, n = 0, on = 0;
436220877Srmacklem	off_t tmp_off;
437191783Srmacklem
438209120Skib	KASSERT(uio->uio_rw == UIO_READ, ("ncl_read mode"));
439191783Srmacklem	if (uio->uio_resid == 0)
440191783Srmacklem		return (0);
441191783Srmacklem	if (uio->uio_offset < 0)	/* XXX VDIR cookies can be negative */
442191783Srmacklem		return (EINVAL);
443191783Srmacklem	td = uio->uio_td;
444191783Srmacklem
445191783Srmacklem	mtx_lock(&nmp->nm_mtx);
446191783Srmacklem	if ((nmp->nm_flag & NFSMNT_NFSV3) != 0 &&
447191783Srmacklem	    (nmp->nm_state & NFSSTA_GOTFSINFO) == 0) {
448191783Srmacklem		mtx_unlock(&nmp->nm_mtx);
449191783Srmacklem		(void)ncl_fsinfo(nmp, vp, cred, td);
450191783Srmacklem		mtx_lock(&nmp->nm_mtx);
451191783Srmacklem	}
452191783Srmacklem	if (nmp->nm_rsize == 0 || nmp->nm_readdirsize == 0)
453191783Srmacklem		(void) newnfs_iosize(nmp);
454191783Srmacklem
455220877Srmacklem	tmp_off = uio->uio_offset + uio->uio_resid;
456191783Srmacklem	if (vp->v_type != VDIR &&
457220877Srmacklem	    (tmp_off > nmp->nm_maxfilesize || tmp_off < uio->uio_offset)) {
458220877Srmacklem		mtx_unlock(&nmp->nm_mtx);
459191783Srmacklem		return (EFBIG);
460220877Srmacklem	}
461220877Srmacklem	mtx_unlock(&nmp->nm_mtx);
462191783Srmacklem
463191783Srmacklem	if (newnfs_directio_enable && (ioflag & IO_DIRECT) && (vp->v_type == VREG))
464191783Srmacklem		/* No caching/ no readaheads. Just read data into the user buffer */
465191783Srmacklem		return ncl_readrpc(vp, uio, cred);
466191783Srmacklem
467231330Srmacklem	biosize = vp->v_bufobj.bo_bsize;
468191783Srmacklem	seqcount = (int)((off_t)(ioflag >> IO_SEQSHIFT) * biosize / BKVASIZE);
469191783Srmacklem
470191783Srmacklem	error = nfs_bioread_check_cons(vp, td, cred);
471191783Srmacklem	if (error)
472191783Srmacklem		return error;
473191783Srmacklem
474191783Srmacklem	do {
475191783Srmacklem	    u_quad_t nsize;
476191783Srmacklem
477191783Srmacklem	    mtx_lock(&np->n_mtx);
478191783Srmacklem	    nsize = np->n_size;
479191783Srmacklem	    mtx_unlock(&np->n_mtx);
480191783Srmacklem
481191783Srmacklem	    switch (vp->v_type) {
482191783Srmacklem	    case VREG:
483191783Srmacklem		NFSINCRGLOBAL(newnfsstats.biocache_reads);
484191783Srmacklem		lbn = uio->uio_offset / biosize;
485191783Srmacklem		on = uio->uio_offset & (biosize - 1);
486191783Srmacklem
487191783Srmacklem		/*
488191783Srmacklem		 * Start the read ahead(s), as required.
489191783Srmacklem		 */
490191783Srmacklem		if (nmp->nm_readahead > 0) {
491191783Srmacklem		    for (nra = 0; nra < nmp->nm_readahead && nra < seqcount &&
492191783Srmacklem			(off_t)(lbn + 1 + nra) * biosize < nsize; nra++) {
493191783Srmacklem			rabn = lbn + 1 + nra;
494191783Srmacklem			if (incore(&vp->v_bufobj, rabn) == NULL) {
495191783Srmacklem			    rabp = nfs_getcacheblk(vp, rabn, biosize, td);
496191783Srmacklem			    if (!rabp) {
497191783Srmacklem				error = newnfs_sigintr(nmp, td);
498212217Srmacklem				return (error ? error : EINTR);
499191783Srmacklem			    }
500191783Srmacklem			    if ((rabp->b_flags & (B_CACHE|B_DELWRI)) == 0) {
501191783Srmacklem				rabp->b_flags |= B_ASYNC;
502191783Srmacklem				rabp->b_iocmd = BIO_READ;
503191783Srmacklem				vfs_busy_pages(rabp, 0);
504191783Srmacklem				if (ncl_asyncio(nmp, rabp, cred, td)) {
505191783Srmacklem				    rabp->b_flags |= B_INVAL;
506191783Srmacklem				    rabp->b_ioflags |= BIO_ERROR;
507191783Srmacklem				    vfs_unbusy_pages(rabp);
508191783Srmacklem				    brelse(rabp);
509191783Srmacklem				    break;
510191783Srmacklem				}
511191783Srmacklem			    } else {
512191783Srmacklem				brelse(rabp);
513191783Srmacklem			    }
514191783Srmacklem			}
515191783Srmacklem		    }
516191783Srmacklem		}
517191783Srmacklem
518191783Srmacklem		/* Note that bcount is *not* DEV_BSIZE aligned. */
519191783Srmacklem		bcount = biosize;
520191783Srmacklem		if ((off_t)lbn * biosize >= nsize) {
521191783Srmacklem			bcount = 0;
522191783Srmacklem		} else if ((off_t)(lbn + 1) * biosize > nsize) {
523191783Srmacklem			bcount = nsize - (off_t)lbn * biosize;
524191783Srmacklem		}
525191783Srmacklem		bp = nfs_getcacheblk(vp, lbn, bcount, td);
526191783Srmacklem
527191783Srmacklem		if (!bp) {
528191783Srmacklem			error = newnfs_sigintr(nmp, td);
529191783Srmacklem			return (error ? error : EINTR);
530191783Srmacklem		}
531191783Srmacklem
532191783Srmacklem		/*
533191783Srmacklem		 * If B_CACHE is not set, we must issue the read.  If this
534191783Srmacklem		 * fails, we return an error.
535191783Srmacklem		 */
536191783Srmacklem
537191783Srmacklem		if ((bp->b_flags & B_CACHE) == 0) {
538191783Srmacklem		    bp->b_iocmd = BIO_READ;
539191783Srmacklem		    vfs_busy_pages(bp, 0);
540207082Srmacklem		    error = ncl_doio(vp, bp, cred, td, 0);
541191783Srmacklem		    if (error) {
542191783Srmacklem			brelse(bp);
543191783Srmacklem			return (error);
544191783Srmacklem		    }
545191783Srmacklem		}
546191783Srmacklem
547191783Srmacklem		/*
548191783Srmacklem		 * on is the offset into the current bp.  Figure out how many
549191783Srmacklem		 * bytes we can copy out of the bp.  Note that bcount is
550191783Srmacklem		 * NOT DEV_BSIZE aligned.
551191783Srmacklem		 *
552191783Srmacklem		 * Then figure out how many bytes we can copy into the uio.
553191783Srmacklem		 */
554191783Srmacklem
555191783Srmacklem		n = 0;
556191783Srmacklem		if (on < bcount)
557233353Skib			n = MIN((unsigned)(bcount - on), uio->uio_resid);
558191783Srmacklem		break;
559191783Srmacklem	    case VLNK:
560191783Srmacklem		NFSINCRGLOBAL(newnfsstats.biocache_readlinks);
561191783Srmacklem		bp = nfs_getcacheblk(vp, (daddr_t)0, NFS_MAXPATHLEN, td);
562191783Srmacklem		if (!bp) {
563191783Srmacklem			error = newnfs_sigintr(nmp, td);
564191783Srmacklem			return (error ? error : EINTR);
565191783Srmacklem		}
566191783Srmacklem		if ((bp->b_flags & B_CACHE) == 0) {
567191783Srmacklem		    bp->b_iocmd = BIO_READ;
568191783Srmacklem		    vfs_busy_pages(bp, 0);
569207082Srmacklem		    error = ncl_doio(vp, bp, cred, td, 0);
570191783Srmacklem		    if (error) {
571191783Srmacklem			bp->b_ioflags |= BIO_ERROR;
572191783Srmacklem			brelse(bp);
573191783Srmacklem			return (error);
574191783Srmacklem		    }
575191783Srmacklem		}
576233353Skib		n = MIN(uio->uio_resid, NFS_MAXPATHLEN - bp->b_resid);
577191783Srmacklem		on = 0;
578191783Srmacklem		break;
579191783Srmacklem	    case VDIR:
580191783Srmacklem		NFSINCRGLOBAL(newnfsstats.biocache_readdirs);
581191783Srmacklem		if (np->n_direofoffset
582191783Srmacklem		    && uio->uio_offset >= np->n_direofoffset) {
583191783Srmacklem		    return (0);
584191783Srmacklem		}
585191783Srmacklem		lbn = (uoff_t)uio->uio_offset / NFS_DIRBLKSIZ;
586191783Srmacklem		on = uio->uio_offset & (NFS_DIRBLKSIZ - 1);
587191783Srmacklem		bp = nfs_getcacheblk(vp, lbn, NFS_DIRBLKSIZ, td);
588191783Srmacklem		if (!bp) {
589191783Srmacklem		    error = newnfs_sigintr(nmp, td);
590191783Srmacklem		    return (error ? error : EINTR);
591191783Srmacklem		}
592191783Srmacklem		if ((bp->b_flags & B_CACHE) == 0) {
593191783Srmacklem		    bp->b_iocmd = BIO_READ;
594191783Srmacklem		    vfs_busy_pages(bp, 0);
595207082Srmacklem		    error = ncl_doio(vp, bp, cred, td, 0);
596191783Srmacklem		    if (error) {
597191783Srmacklem			    brelse(bp);
598191783Srmacklem		    }
599191783Srmacklem		    while (error == NFSERR_BAD_COOKIE) {
600191783Srmacklem			ncl_invaldir(vp);
601191783Srmacklem			error = ncl_vinvalbuf(vp, 0, td, 1);
602191783Srmacklem			/*
603191783Srmacklem			 * Yuck! The directory has been modified on the
604191783Srmacklem			 * server. The only way to get the block is by
605191783Srmacklem			 * reading from the beginning to get all the
606191783Srmacklem			 * offset cookies.
607191783Srmacklem			 *
608191783Srmacklem			 * Leave the last bp intact unless there is an error.
609191783Srmacklem			 * Loop back up to the while if the error is another
610191783Srmacklem			 * NFSERR_BAD_COOKIE (double yuch!).
611191783Srmacklem			 */
612191783Srmacklem			for (i = 0; i <= lbn && !error; i++) {
613191783Srmacklem			    if (np->n_direofoffset
614191783Srmacklem				&& (i * NFS_DIRBLKSIZ) >= np->n_direofoffset)
615191783Srmacklem				    return (0);
616191783Srmacklem			    bp = nfs_getcacheblk(vp, i, NFS_DIRBLKSIZ, td);
617191783Srmacklem			    if (!bp) {
618191783Srmacklem				error = newnfs_sigintr(nmp, td);
619191783Srmacklem				return (error ? error : EINTR);
620191783Srmacklem			    }
621191783Srmacklem			    if ((bp->b_flags & B_CACHE) == 0) {
622191783Srmacklem				    bp->b_iocmd = BIO_READ;
623191783Srmacklem				    vfs_busy_pages(bp, 0);
624207082Srmacklem				    error = ncl_doio(vp, bp, cred, td, 0);
625191783Srmacklem				    /*
626191783Srmacklem				     * no error + B_INVAL == directory EOF,
627191783Srmacklem				     * use the block.
628191783Srmacklem				     */
629191783Srmacklem				    if (error == 0 && (bp->b_flags & B_INVAL))
630191783Srmacklem					    break;
631191783Srmacklem			    }
632191783Srmacklem			    /*
633191783Srmacklem			     * An error will throw away the block and the
634191783Srmacklem			     * for loop will break out.  If no error and this
635191783Srmacklem			     * is not the block we want, we throw away the
636191783Srmacklem			     * block and go for the next one via the for loop.
637191783Srmacklem			     */
638191783Srmacklem			    if (error || i < lbn)
639191783Srmacklem				    brelse(bp);
640191783Srmacklem			}
641191783Srmacklem		    }
642191783Srmacklem		    /*
643191783Srmacklem		     * The above while is repeated if we hit another cookie
644191783Srmacklem		     * error.  If we hit an error and it wasn't a cookie error,
645191783Srmacklem		     * we give up.
646191783Srmacklem		     */
647191783Srmacklem		    if (error)
648191783Srmacklem			    return (error);
649191783Srmacklem		}
650191783Srmacklem
651191783Srmacklem		/*
652191783Srmacklem		 * If not eof and read aheads are enabled, start one.
653191783Srmacklem		 * (You need the current block first, so that you have the
654191783Srmacklem		 *  directory offset cookie of the next block.)
655191783Srmacklem		 */
656191783Srmacklem		if (nmp->nm_readahead > 0 &&
657191783Srmacklem		    (bp->b_flags & B_INVAL) == 0 &&
658191783Srmacklem		    (np->n_direofoffset == 0 ||
659191783Srmacklem		    (lbn + 1) * NFS_DIRBLKSIZ < np->n_direofoffset) &&
660191783Srmacklem		    incore(&vp->v_bufobj, lbn + 1) == NULL) {
661191783Srmacklem			rabp = nfs_getcacheblk(vp, lbn + 1, NFS_DIRBLKSIZ, td);
662191783Srmacklem			if (rabp) {
663191783Srmacklem			    if ((rabp->b_flags & (B_CACHE|B_DELWRI)) == 0) {
664191783Srmacklem				rabp->b_flags |= B_ASYNC;
665191783Srmacklem				rabp->b_iocmd = BIO_READ;
666191783Srmacklem				vfs_busy_pages(rabp, 0);
667191783Srmacklem				if (ncl_asyncio(nmp, rabp, cred, td)) {
668191783Srmacklem				    rabp->b_flags |= B_INVAL;
669191783Srmacklem				    rabp->b_ioflags |= BIO_ERROR;
670191783Srmacklem				    vfs_unbusy_pages(rabp);
671191783Srmacklem				    brelse(rabp);
672191783Srmacklem				}
673191783Srmacklem			    } else {
674191783Srmacklem				brelse(rabp);
675191783Srmacklem			    }
676191783Srmacklem			}
677191783Srmacklem		}
678191783Srmacklem		/*
679191783Srmacklem		 * Unlike VREG files, whos buffer size ( bp->b_bcount ) is
680191783Srmacklem		 * chopped for the EOF condition, we cannot tell how large
681191783Srmacklem		 * NFS directories are going to be until we hit EOF.  So
682191783Srmacklem		 * an NFS directory buffer is *not* chopped to its EOF.  Now,
683191783Srmacklem		 * it just so happens that b_resid will effectively chop it
684191783Srmacklem		 * to EOF.  *BUT* this information is lost if the buffer goes
685191783Srmacklem		 * away and is reconstituted into a B_CACHE state ( due to
686191783Srmacklem		 * being VMIO ) later.  So we keep track of the directory eof
687191783Srmacklem		 * in np->n_direofoffset and chop it off as an extra step
688191783Srmacklem		 * right here.
689191783Srmacklem		 */
690191783Srmacklem		n = lmin(uio->uio_resid, NFS_DIRBLKSIZ - bp->b_resid - on);
691191783Srmacklem		if (np->n_direofoffset && n > np->n_direofoffset - uio->uio_offset)
692191783Srmacklem			n = np->n_direofoffset - uio->uio_offset;
693191783Srmacklem		break;
694191783Srmacklem	    default:
695191783Srmacklem		ncl_printf(" ncl_bioread: type %x unexpected\n", vp->v_type);
696191783Srmacklem		bp = NULL;
697191783Srmacklem		break;
698191783Srmacklem	    };
699191783Srmacklem
700191783Srmacklem	    if (n > 0) {
701239852Skib		    error = vn_io_fault_uiomove(bp->b_data + on, (int)n, uio);
702191783Srmacklem	    }
703191783Srmacklem	    if (vp->v_type == VLNK)
704191783Srmacklem		n = 0;
705191783Srmacklem	    if (bp != NULL)
706191783Srmacklem		brelse(bp);
707191783Srmacklem	} while (error == 0 && uio->uio_resid > 0 && n > 0);
708191783Srmacklem	return (error);
709191783Srmacklem}
710191783Srmacklem
711191783Srmacklem/*
712191783Srmacklem * The NFS write path cannot handle iovecs with len > 1. So we need to
713191783Srmacklem * break up iovecs accordingly (restricting them to wsize).
714191783Srmacklem * For the SYNC case, we can do this with 1 copy (user buffer -> mbuf).
715191783Srmacklem * For the ASYNC case, 2 copies are needed. The first a copy from the
716191783Srmacklem * user buffer to a staging buffer and then a second copy from the staging
717191783Srmacklem * buffer to mbufs. This can be optimized by copying from the user buffer
718191783Srmacklem * directly into mbufs and passing the chain down, but that requires a
719191783Srmacklem * fair amount of re-working of the relevant codepaths (and can be done
720191783Srmacklem * later).
721191783Srmacklem */
722191783Srmacklemstatic int
723191783Srmacklemnfs_directio_write(vp, uiop, cred, ioflag)
724191783Srmacklem	struct vnode *vp;
725191783Srmacklem	struct uio *uiop;
726191783Srmacklem	struct ucred *cred;
727191783Srmacklem	int ioflag;
728191783Srmacklem{
729191783Srmacklem	int error;
730191783Srmacklem	struct nfsmount *nmp = VFSTONFS(vp->v_mount);
731191783Srmacklem	struct thread *td = uiop->uio_td;
732191783Srmacklem	int size;
733191783Srmacklem	int wsize;
734191783Srmacklem
735191783Srmacklem	mtx_lock(&nmp->nm_mtx);
736191783Srmacklem	wsize = nmp->nm_wsize;
737191783Srmacklem	mtx_unlock(&nmp->nm_mtx);
738191783Srmacklem	if (ioflag & IO_SYNC) {
739191783Srmacklem		int iomode, must_commit;
740191783Srmacklem		struct uio uio;
741191783Srmacklem		struct iovec iov;
742191783Srmacklemdo_sync:
743191783Srmacklem		while (uiop->uio_resid > 0) {
744233353Skib			size = MIN(uiop->uio_resid, wsize);
745233353Skib			size = MIN(uiop->uio_iov->iov_len, size);
746191783Srmacklem			iov.iov_base = uiop->uio_iov->iov_base;
747191783Srmacklem			iov.iov_len = size;
748191783Srmacklem			uio.uio_iov = &iov;
749191783Srmacklem			uio.uio_iovcnt = 1;
750191783Srmacklem			uio.uio_offset = uiop->uio_offset;
751191783Srmacklem			uio.uio_resid = size;
752191783Srmacklem			uio.uio_segflg = UIO_USERSPACE;
753191783Srmacklem			uio.uio_rw = UIO_WRITE;
754191783Srmacklem			uio.uio_td = td;
755191783Srmacklem			iomode = NFSWRITE_FILESYNC;
756191783Srmacklem			error = ncl_writerpc(vp, &uio, cred, &iomode,
757207082Srmacklem			    &must_commit, 0);
758191783Srmacklem			KASSERT((must_commit == 0),
759191783Srmacklem				("ncl_directio_write: Did not commit write"));
760191783Srmacklem			if (error)
761191783Srmacklem				return (error);
762191783Srmacklem			uiop->uio_offset += size;
763191783Srmacklem			uiop->uio_resid -= size;
764191783Srmacklem			if (uiop->uio_iov->iov_len <= size) {
765191783Srmacklem				uiop->uio_iovcnt--;
766191783Srmacklem				uiop->uio_iov++;
767191783Srmacklem			} else {
768191783Srmacklem				uiop->uio_iov->iov_base =
769191783Srmacklem					(char *)uiop->uio_iov->iov_base + size;
770191783Srmacklem				uiop->uio_iov->iov_len -= size;
771191783Srmacklem			}
772191783Srmacklem		}
773191783Srmacklem	} else {
774191783Srmacklem		struct uio *t_uio;
775191783Srmacklem		struct iovec *t_iov;
776191783Srmacklem		struct buf *bp;
777191783Srmacklem
778191783Srmacklem		/*
779191783Srmacklem		 * Break up the write into blocksize chunks and hand these
780191783Srmacklem		 * over to nfsiod's for write back.
781191783Srmacklem		 * Unfortunately, this incurs a copy of the data. Since
782191783Srmacklem		 * the user could modify the buffer before the write is
783191783Srmacklem		 * initiated.
784191783Srmacklem		 *
785191783Srmacklem		 * The obvious optimization here is that one of the 2 copies
786191783Srmacklem		 * in the async write path can be eliminated by copying the
787191783Srmacklem		 * data here directly into mbufs and passing the mbuf chain
788191783Srmacklem		 * down. But that will require a fair amount of re-working
789191783Srmacklem		 * of the code and can be done if there's enough interest
790191783Srmacklem		 * in NFS directio access.
791191783Srmacklem		 */
792191783Srmacklem		while (uiop->uio_resid > 0) {
793233353Skib			size = MIN(uiop->uio_resid, wsize);
794233353Skib			size = MIN(uiop->uio_iov->iov_len, size);
795191783Srmacklem			bp = getpbuf(&ncl_pbuf_freecnt);
796191783Srmacklem			t_uio = malloc(sizeof(struct uio), M_NFSDIRECTIO, M_WAITOK);
797191783Srmacklem			t_iov = malloc(sizeof(struct iovec), M_NFSDIRECTIO, M_WAITOK);
798191783Srmacklem			t_iov->iov_base = malloc(size, M_NFSDIRECTIO, M_WAITOK);
799191783Srmacklem			t_iov->iov_len = size;
800191783Srmacklem			t_uio->uio_iov = t_iov;
801191783Srmacklem			t_uio->uio_iovcnt = 1;
802191783Srmacklem			t_uio->uio_offset = uiop->uio_offset;
803191783Srmacklem			t_uio->uio_resid = size;
804191783Srmacklem			t_uio->uio_segflg = UIO_SYSSPACE;
805191783Srmacklem			t_uio->uio_rw = UIO_WRITE;
806191783Srmacklem			t_uio->uio_td = td;
807232682Srmacklem			KASSERT(uiop->uio_segflg == UIO_USERSPACE ||
808232682Srmacklem			    uiop->uio_segflg == UIO_SYSSPACE,
809232682Srmacklem			    ("nfs_directio_write: Bad uio_segflg"));
810232682Srmacklem			if (uiop->uio_segflg == UIO_USERSPACE) {
811232682Srmacklem				error = copyin(uiop->uio_iov->iov_base,
812232682Srmacklem				    t_iov->iov_base, size);
813232682Srmacklem				if (error != 0)
814232682Srmacklem					goto err_free;
815232682Srmacklem			} else
816232682Srmacklem				/*
817232682Srmacklem				 * UIO_SYSSPACE may never happen, but handle
818232682Srmacklem				 * it just in case it does.
819232682Srmacklem				 */
820232682Srmacklem				bcopy(uiop->uio_iov->iov_base, t_iov->iov_base,
821232682Srmacklem				    size);
822191783Srmacklem			bp->b_flags |= B_DIRECT;
823191783Srmacklem			bp->b_iocmd = BIO_WRITE;
824191783Srmacklem			if (cred != NOCRED) {
825191783Srmacklem				crhold(cred);
826191783Srmacklem				bp->b_wcred = cred;
827191783Srmacklem			} else
828191783Srmacklem				bp->b_wcred = NOCRED;
829191783Srmacklem			bp->b_caller1 = (void *)t_uio;
830191783Srmacklem			bp->b_vp = vp;
831191783Srmacklem			error = ncl_asyncio(nmp, bp, NOCRED, td);
832232682Srmacklemerr_free:
833191783Srmacklem			if (error) {
834191783Srmacklem				free(t_iov->iov_base, M_NFSDIRECTIO);
835191783Srmacklem				free(t_iov, M_NFSDIRECTIO);
836191783Srmacklem				free(t_uio, M_NFSDIRECTIO);
837191783Srmacklem				bp->b_vp = NULL;
838191783Srmacklem				relpbuf(bp, &ncl_pbuf_freecnt);
839191783Srmacklem				if (error == EINTR)
840191783Srmacklem					return (error);
841191783Srmacklem				goto do_sync;
842191783Srmacklem			}
843191783Srmacklem			uiop->uio_offset += size;
844191783Srmacklem			uiop->uio_resid -= size;
845191783Srmacklem			if (uiop->uio_iov->iov_len <= size) {
846191783Srmacklem				uiop->uio_iovcnt--;
847191783Srmacklem				uiop->uio_iov++;
848191783Srmacklem			} else {
849191783Srmacklem				uiop->uio_iov->iov_base =
850191783Srmacklem					(char *)uiop->uio_iov->iov_base + size;
851191783Srmacklem				uiop->uio_iov->iov_len -= size;
852191783Srmacklem			}
853191783Srmacklem		}
854191783Srmacklem	}
855191783Srmacklem	return (0);
856191783Srmacklem}
857191783Srmacklem
858191783Srmacklem/*
859191783Srmacklem * Vnode op for write using bio
860191783Srmacklem */
861191783Srmacklemint
862191783Srmacklemncl_write(struct vop_write_args *ap)
863191783Srmacklem{
864191783Srmacklem	int biosize;
865191783Srmacklem	struct uio *uio = ap->a_uio;
866191783Srmacklem	struct thread *td = uio->uio_td;
867191783Srmacklem	struct vnode *vp = ap->a_vp;
868191783Srmacklem	struct nfsnode *np = VTONFS(vp);
869191783Srmacklem	struct ucred *cred = ap->a_cred;
870191783Srmacklem	int ioflag = ap->a_ioflag;
871191783Srmacklem	struct buf *bp;
872191783Srmacklem	struct vattr vattr;
873191783Srmacklem	struct nfsmount *nmp = VFSTONFS(vp->v_mount);
874191783Srmacklem	daddr_t lbn;
875191783Srmacklem	int bcount;
876239848Skib	int bp_cached, n, on, error = 0, error1;
877239845Skib	size_t orig_resid, local_resid;
878239845Skib	off_t orig_size, tmp_off;
879191783Srmacklem
880209120Skib	KASSERT(uio->uio_rw == UIO_WRITE, ("ncl_write mode"));
881209120Skib	KASSERT(uio->uio_segflg != UIO_USERSPACE || uio->uio_td == curthread,
882209120Skib	    ("ncl_write proc"));
883191783Srmacklem	if (vp->v_type != VREG)
884191783Srmacklem		return (EIO);
885191783Srmacklem	mtx_lock(&np->n_mtx);
886191783Srmacklem	if (np->n_flag & NWRITEERR) {
887191783Srmacklem		np->n_flag &= ~NWRITEERR;
888191783Srmacklem		mtx_unlock(&np->n_mtx);
889191783Srmacklem		return (np->n_error);
890191783Srmacklem	} else
891191783Srmacklem		mtx_unlock(&np->n_mtx);
892191783Srmacklem	mtx_lock(&nmp->nm_mtx);
893191783Srmacklem	if ((nmp->nm_flag & NFSMNT_NFSV3) != 0 &&
894191783Srmacklem	    (nmp->nm_state & NFSSTA_GOTFSINFO) == 0) {
895191783Srmacklem		mtx_unlock(&nmp->nm_mtx);
896191783Srmacklem		(void)ncl_fsinfo(nmp, vp, cred, td);
897191783Srmacklem		mtx_lock(&nmp->nm_mtx);
898191783Srmacklem	}
899191783Srmacklem	if (nmp->nm_wsize == 0)
900191783Srmacklem		(void) newnfs_iosize(nmp);
901191783Srmacklem	mtx_unlock(&nmp->nm_mtx);
902191783Srmacklem
903191783Srmacklem	/*
904191783Srmacklem	 * Synchronously flush pending buffers if we are in synchronous
905191783Srmacklem	 * mode or if we are appending.
906191783Srmacklem	 */
907191783Srmacklem	if (ioflag & (IO_APPEND | IO_SYNC)) {
908191783Srmacklem		mtx_lock(&np->n_mtx);
909191783Srmacklem		if (np->n_flag & NMODIFIED) {
910191783Srmacklem			mtx_unlock(&np->n_mtx);
911191783Srmacklem#ifdef notyet /* Needs matching nonblock semantics elsewhere, too. */
912191783Srmacklem			/*
913191783Srmacklem			 * Require non-blocking, synchronous writes to
914191783Srmacklem			 * dirty files to inform the program it needs
915191783Srmacklem			 * to fsync(2) explicitly.
916191783Srmacklem			 */
917191783Srmacklem			if (ioflag & IO_NDELAY)
918191783Srmacklem				return (EAGAIN);
919191783Srmacklem#endif
920191783Srmacklemflush_and_restart:
921191783Srmacklem			np->n_attrstamp = 0;
922223280Srmacklem			KDTRACE_NFS_ATTRCACHE_FLUSH_DONE(vp);
923191783Srmacklem			error = ncl_vinvalbuf(vp, V_SAVE, td, 1);
924191783Srmacklem			if (error)
925191783Srmacklem				return (error);
926191783Srmacklem		} else
927191783Srmacklem			mtx_unlock(&np->n_mtx);
928191783Srmacklem	}
929191783Srmacklem
930239845Skib	orig_resid = uio->uio_resid;
931239845Skib	mtx_lock(&np->n_mtx);
932239845Skib	orig_size = np->n_size;
933239845Skib	mtx_unlock(&np->n_mtx);
934239845Skib
935191783Srmacklem	/*
936191783Srmacklem	 * If IO_APPEND then load uio_offset.  We restart here if we cannot
937191783Srmacklem	 * get the append lock.
938191783Srmacklem	 */
939191783Srmacklem	if (ioflag & IO_APPEND) {
940191783Srmacklem		np->n_attrstamp = 0;
941223280Srmacklem		KDTRACE_NFS_ATTRCACHE_FLUSH_DONE(vp);
942191783Srmacklem		error = VOP_GETATTR(vp, &vattr, cred);
943191783Srmacklem		if (error)
944191783Srmacklem			return (error);
945191783Srmacklem		mtx_lock(&np->n_mtx);
946191783Srmacklem		uio->uio_offset = np->n_size;
947191783Srmacklem		mtx_unlock(&np->n_mtx);
948191783Srmacklem	}
949191783Srmacklem
950191783Srmacklem	if (uio->uio_offset < 0)
951191783Srmacklem		return (EINVAL);
952220877Srmacklem	tmp_off = uio->uio_offset + uio->uio_resid;
953220928Srmacklem	if (tmp_off > nmp->nm_maxfilesize || tmp_off < uio->uio_offset)
954191783Srmacklem		return (EFBIG);
955191783Srmacklem	if (uio->uio_resid == 0)
956191783Srmacklem		return (0);
957191783Srmacklem
958191783Srmacklem	if (newnfs_directio_enable && (ioflag & IO_DIRECT) && vp->v_type == VREG)
959191783Srmacklem		return nfs_directio_write(vp, uio, cred, ioflag);
960191783Srmacklem
961191783Srmacklem	/*
962191783Srmacklem	 * Maybe this should be above the vnode op call, but so long as
963191783Srmacklem	 * file servers have no limits, i don't think it matters
964191783Srmacklem	 */
965207662Strasz	if (vn_rlimit_fsize(vp, uio, td))
966207662Strasz		return (EFBIG);
967191783Srmacklem
968231330Srmacklem	biosize = vp->v_bufobj.bo_bsize;
969191783Srmacklem	/*
970191783Srmacklem	 * Find all of this file's B_NEEDCOMMIT buffers.  If our writes
971191783Srmacklem	 * would exceed the local maximum per-file write commit size when
972191783Srmacklem	 * combined with those, we must decide whether to flush,
973191783Srmacklem	 * go synchronous, or return error.  We don't bother checking
974191783Srmacklem	 * IO_UNIT -- we just make all writes atomic anyway, as there's
975191783Srmacklem	 * no point optimizing for something that really won't ever happen.
976191783Srmacklem	 */
977191783Srmacklem	if (!(ioflag & IO_SYNC)) {
978191783Srmacklem		int nflag;
979191783Srmacklem
980191783Srmacklem		mtx_lock(&np->n_mtx);
981191783Srmacklem		nflag = np->n_flag;
982191783Srmacklem		mtx_unlock(&np->n_mtx);
983191783Srmacklem		int needrestart = 0;
984191783Srmacklem		if (nmp->nm_wcommitsize < uio->uio_resid) {
985191783Srmacklem			/*
986191783Srmacklem			 * If this request could not possibly be completed
987191783Srmacklem			 * without exceeding the maximum outstanding write
988191783Srmacklem			 * commit size, see if we can convert it into a
989191783Srmacklem			 * synchronous write operation.
990191783Srmacklem			 */
991191783Srmacklem			if (ioflag & IO_NDELAY)
992191783Srmacklem				return (EAGAIN);
993191783Srmacklem			ioflag |= IO_SYNC;
994191783Srmacklem			if (nflag & NMODIFIED)
995191783Srmacklem				needrestart = 1;
996191783Srmacklem		} else if (nflag & NMODIFIED) {
997191783Srmacklem			int wouldcommit = 0;
998191783Srmacklem			BO_LOCK(&vp->v_bufobj);
999191783Srmacklem			if (vp->v_bufobj.bo_dirty.bv_cnt != 0) {
1000191783Srmacklem				TAILQ_FOREACH(bp, &vp->v_bufobj.bo_dirty.bv_hd,
1001191783Srmacklem				    b_bobufs) {
1002191783Srmacklem					if (bp->b_flags & B_NEEDCOMMIT)
1003191783Srmacklem						wouldcommit += bp->b_bcount;
1004191783Srmacklem				}
1005191783Srmacklem			}
1006191783Srmacklem			BO_UNLOCK(&vp->v_bufobj);
1007191783Srmacklem			/*
1008191783Srmacklem			 * Since we're not operating synchronously and
1009191783Srmacklem			 * bypassing the buffer cache, we are in a commit
1010191783Srmacklem			 * and holding all of these buffers whether
1011191783Srmacklem			 * transmitted or not.  If not limited, this
1012191783Srmacklem			 * will lead to the buffer cache deadlocking,
1013191783Srmacklem			 * as no one else can flush our uncommitted buffers.
1014191783Srmacklem			 */
1015191783Srmacklem			wouldcommit += uio->uio_resid;
1016191783Srmacklem			/*
1017191783Srmacklem			 * If we would initially exceed the maximum
1018191783Srmacklem			 * outstanding write commit size, flush and restart.
1019191783Srmacklem			 */
1020191783Srmacklem			if (wouldcommit > nmp->nm_wcommitsize)
1021191783Srmacklem				needrestart = 1;
1022191783Srmacklem		}
1023191783Srmacklem		if (needrestart)
1024191783Srmacklem			goto flush_and_restart;
1025191783Srmacklem	}
1026191783Srmacklem
1027191783Srmacklem	do {
1028191783Srmacklem		NFSINCRGLOBAL(newnfsstats.biocache_writes);
1029191783Srmacklem		lbn = uio->uio_offset / biosize;
1030191783Srmacklem		on = uio->uio_offset & (biosize-1);
1031233353Skib		n = MIN((unsigned)(biosize - on), uio->uio_resid);
1032191783Srmacklemagain:
1033191783Srmacklem		/*
1034191783Srmacklem		 * Handle direct append and file extension cases, calculate
1035191783Srmacklem		 * unaligned buffer size.
1036191783Srmacklem		 */
1037191783Srmacklem		mtx_lock(&np->n_mtx);
1038191783Srmacklem		if (uio->uio_offset == np->n_size && n) {
1039191783Srmacklem			mtx_unlock(&np->n_mtx);
1040191783Srmacklem			/*
1041191783Srmacklem			 * Get the buffer (in its pre-append state to maintain
1042191783Srmacklem			 * B_CACHE if it was previously set).  Resize the
1043191783Srmacklem			 * nfsnode after we have locked the buffer to prevent
1044191783Srmacklem			 * readers from reading garbage.
1045191783Srmacklem			 */
1046191783Srmacklem			bcount = on;
1047191783Srmacklem			bp = nfs_getcacheblk(vp, lbn, bcount, td);
1048191783Srmacklem
1049191783Srmacklem			if (bp != NULL) {
1050191783Srmacklem				long save;
1051191783Srmacklem
1052191783Srmacklem				mtx_lock(&np->n_mtx);
1053191783Srmacklem				np->n_size = uio->uio_offset + n;
1054191783Srmacklem				np->n_flag |= NMODIFIED;
1055191783Srmacklem				vnode_pager_setsize(vp, np->n_size);
1056191783Srmacklem				mtx_unlock(&np->n_mtx);
1057191783Srmacklem
1058191783Srmacklem				save = bp->b_flags & B_CACHE;
1059191783Srmacklem				bcount += n;
1060191783Srmacklem				allocbuf(bp, bcount);
1061191783Srmacklem				bp->b_flags |= save;
1062191783Srmacklem			}
1063191783Srmacklem		} else {
1064191783Srmacklem			/*
1065191783Srmacklem			 * Obtain the locked cache block first, and then
1066191783Srmacklem			 * adjust the file's size as appropriate.
1067191783Srmacklem			 */
1068191783Srmacklem			bcount = on + n;
1069191783Srmacklem			if ((off_t)lbn * biosize + bcount < np->n_size) {
1070191783Srmacklem				if ((off_t)(lbn + 1) * biosize < np->n_size)
1071191783Srmacklem					bcount = biosize;
1072191783Srmacklem				else
1073191783Srmacklem					bcount = np->n_size - (off_t)lbn * biosize;
1074191783Srmacklem			}
1075191783Srmacklem			mtx_unlock(&np->n_mtx);
1076191783Srmacklem			bp = nfs_getcacheblk(vp, lbn, bcount, td);
1077191783Srmacklem			mtx_lock(&np->n_mtx);
1078191783Srmacklem			if (uio->uio_offset + n > np->n_size) {
1079191783Srmacklem				np->n_size = uio->uio_offset + n;
1080191783Srmacklem				np->n_flag |= NMODIFIED;
1081191783Srmacklem				vnode_pager_setsize(vp, np->n_size);
1082191783Srmacklem			}
1083191783Srmacklem			mtx_unlock(&np->n_mtx);
1084191783Srmacklem		}
1085191783Srmacklem
1086191783Srmacklem		if (!bp) {
1087191783Srmacklem			error = newnfs_sigintr(nmp, td);
1088191783Srmacklem			if (!error)
1089191783Srmacklem				error = EINTR;
1090191783Srmacklem			break;
1091191783Srmacklem		}
1092191783Srmacklem
1093191783Srmacklem		/*
1094191783Srmacklem		 * Issue a READ if B_CACHE is not set.  In special-append
1095191783Srmacklem		 * mode, B_CACHE is based on the buffer prior to the write
1096191783Srmacklem		 * op and is typically set, avoiding the read.  If a read
1097191783Srmacklem		 * is required in special append mode, the server will
1098191783Srmacklem		 * probably send us a short-read since we extended the file
1099191783Srmacklem		 * on our end, resulting in b_resid == 0 and, thusly,
1100191783Srmacklem		 * B_CACHE getting set.
1101191783Srmacklem		 *
1102191783Srmacklem		 * We can also avoid issuing the read if the write covers
1103191783Srmacklem		 * the entire buffer.  We have to make sure the buffer state
1104191783Srmacklem		 * is reasonable in this case since we will not be initiating
1105191783Srmacklem		 * I/O.  See the comments in kern/vfs_bio.c's getblk() for
1106191783Srmacklem		 * more information.
1107191783Srmacklem		 *
1108191783Srmacklem		 * B_CACHE may also be set due to the buffer being cached
1109191783Srmacklem		 * normally.
1110191783Srmacklem		 */
1111191783Srmacklem
1112239845Skib		bp_cached = 1;
1113191783Srmacklem		if (on == 0 && n == bcount) {
1114239845Skib			if ((bp->b_flags & B_CACHE) == 0)
1115239845Skib				bp_cached = 0;
1116191783Srmacklem			bp->b_flags |= B_CACHE;
1117191783Srmacklem			bp->b_flags &= ~B_INVAL;
1118191783Srmacklem			bp->b_ioflags &= ~BIO_ERROR;
1119191783Srmacklem		}
1120191783Srmacklem
1121191783Srmacklem		if ((bp->b_flags & B_CACHE) == 0) {
1122191783Srmacklem			bp->b_iocmd = BIO_READ;
1123191783Srmacklem			vfs_busy_pages(bp, 0);
1124207082Srmacklem			error = ncl_doio(vp, bp, cred, td, 0);
1125191783Srmacklem			if (error) {
1126191783Srmacklem				brelse(bp);
1127191783Srmacklem				break;
1128191783Srmacklem			}
1129191783Srmacklem		}
1130191783Srmacklem		if (bp->b_wcred == NOCRED)
1131191783Srmacklem			bp->b_wcred = crhold(cred);
1132191783Srmacklem		mtx_lock(&np->n_mtx);
1133191783Srmacklem		np->n_flag |= NMODIFIED;
1134191783Srmacklem		mtx_unlock(&np->n_mtx);
1135191783Srmacklem
1136191783Srmacklem		/*
1137191783Srmacklem		 * If dirtyend exceeds file size, chop it down.  This should
1138191783Srmacklem		 * not normally occur but there is an append race where it
1139191783Srmacklem		 * might occur XXX, so we log it.
1140191783Srmacklem		 *
1141191783Srmacklem		 * If the chopping creates a reverse-indexed or degenerate
1142191783Srmacklem		 * situation with dirtyoff/end, we 0 both of them.
1143191783Srmacklem		 */
1144191783Srmacklem
1145191783Srmacklem		if (bp->b_dirtyend > bcount) {
1146191783Srmacklem			ncl_printf("NFS append race @%lx:%d\n",
1147191783Srmacklem			    (long)bp->b_blkno * DEV_BSIZE,
1148191783Srmacklem			    bp->b_dirtyend - bcount);
1149191783Srmacklem			bp->b_dirtyend = bcount;
1150191783Srmacklem		}
1151191783Srmacklem
1152191783Srmacklem		if (bp->b_dirtyoff >= bp->b_dirtyend)
1153191783Srmacklem			bp->b_dirtyoff = bp->b_dirtyend = 0;
1154191783Srmacklem
1155191783Srmacklem		/*
1156191783Srmacklem		 * If the new write will leave a contiguous dirty
1157191783Srmacklem		 * area, just update the b_dirtyoff and b_dirtyend,
1158191783Srmacklem		 * otherwise force a write rpc of the old dirty area.
1159191783Srmacklem		 *
1160191783Srmacklem		 * While it is possible to merge discontiguous writes due to
1161191783Srmacklem		 * our having a B_CACHE buffer ( and thus valid read data
1162191783Srmacklem		 * for the hole), we don't because it could lead to
1163191783Srmacklem		 * significant cache coherency problems with multiple clients,
1164191783Srmacklem		 * especially if locking is implemented later on.
1165191783Srmacklem		 *
1166236446Skib		 * As an optimization we could theoretically maintain
1167191783Srmacklem		 * a linked list of discontinuous areas, but we would still
1168191783Srmacklem		 * have to commit them separately so there isn't much
1169191783Srmacklem		 * advantage to it except perhaps a bit of asynchronization.
1170191783Srmacklem		 */
1171191783Srmacklem
1172191783Srmacklem		if (bp->b_dirtyend > 0 &&
1173191783Srmacklem		    (on > bp->b_dirtyend || (on + n) < bp->b_dirtyoff)) {
1174191783Srmacklem			if (bwrite(bp) == EINTR) {
1175191783Srmacklem				error = EINTR;
1176191783Srmacklem				break;
1177191783Srmacklem			}
1178191783Srmacklem			goto again;
1179191783Srmacklem		}
1180191783Srmacklem
1181239845Skib		local_resid = uio->uio_resid;
1182239852Skib		error = vn_io_fault_uiomove((char *)bp->b_data + on, n, uio);
1183191783Srmacklem
1184239845Skib		if (error != 0 && !bp_cached) {
1185239845Skib			/*
1186239845Skib			 * This block has no other content then what
1187239845Skib			 * possibly was written by the faulty uiomove.
1188239845Skib			 * Release it, forgetting the data pages, to
1189239845Skib			 * prevent the leak of uninitialized data to
1190239845Skib			 * usermode.
1191239845Skib			 */
1192239845Skib			bp->b_ioflags |= BIO_ERROR;
1193239845Skib			brelse(bp);
1194239845Skib			uio->uio_offset -= local_resid - uio->uio_resid;
1195239845Skib			uio->uio_resid = local_resid;
1196239845Skib			break;
1197239845Skib		}
1198239845Skib
1199191783Srmacklem		/*
1200191783Srmacklem		 * Since this block is being modified, it must be written
1201191783Srmacklem		 * again and not just committed.  Since write clustering does
1202191783Srmacklem		 * not work for the stage 1 data write, only the stage 2
1203191783Srmacklem		 * commit rpc, we have to clear B_CLUSTEROK as well.
1204191783Srmacklem		 */
1205191783Srmacklem		bp->b_flags &= ~(B_NEEDCOMMIT | B_CLUSTEROK);
1206191783Srmacklem
1207239845Skib		/*
1208239845Skib		 * Get the partial update on the progress made from
1209239845Skib		 * uiomove, if an error occured.
1210239845Skib		 */
1211239845Skib		if (error != 0)
1212239845Skib			n = local_resid - uio->uio_resid;
1213191783Srmacklem
1214191783Srmacklem		/*
1215191783Srmacklem		 * Only update dirtyoff/dirtyend if not a degenerate
1216191783Srmacklem		 * condition.
1217191783Srmacklem		 */
1218239845Skib		if (n > 0) {
1219191783Srmacklem			if (bp->b_dirtyend > 0) {
1220191783Srmacklem				bp->b_dirtyoff = min(on, bp->b_dirtyoff);
1221191783Srmacklem				bp->b_dirtyend = max((on + n), bp->b_dirtyend);
1222191783Srmacklem			} else {
1223191783Srmacklem				bp->b_dirtyoff = on;
1224191783Srmacklem				bp->b_dirtyend = on + n;
1225191783Srmacklem			}
1226193187Salc			vfs_bio_set_valid(bp, on, n);
1227191783Srmacklem		}
1228191783Srmacklem
1229191783Srmacklem		/*
1230191783Srmacklem		 * If IO_SYNC do bwrite().
1231191783Srmacklem		 *
1232191783Srmacklem		 * IO_INVAL appears to be unused.  The idea appears to be
1233191783Srmacklem		 * to turn off caching in this case.  Very odd.  XXX
1234191783Srmacklem		 */
1235191783Srmacklem		if ((ioflag & IO_SYNC)) {
1236191783Srmacklem			if (ioflag & IO_INVAL)
1237191783Srmacklem				bp->b_flags |= B_NOCACHE;
1238239848Skib			error1 = bwrite(bp);
1239239848Skib			if (error1 != 0) {
1240239848Skib				if (error == 0)
1241239848Skib					error = error1;
1242191783Srmacklem				break;
1243239848Skib			}
1244191783Srmacklem		} else if ((n + on) == biosize) {
1245191783Srmacklem			bp->b_flags |= B_ASYNC;
1246191783Srmacklem			(void) ncl_writebp(bp, 0, NULL);
1247191783Srmacklem		} else {
1248191783Srmacklem			bdwrite(bp);
1249191783Srmacklem		}
1250239845Skib
1251239845Skib		if (error != 0)
1252239845Skib			break;
1253191783Srmacklem	} while (uio->uio_resid > 0 && n > 0);
1254191783Srmacklem
1255239845Skib	if (error != 0) {
1256239845Skib		if (ioflag & IO_UNIT) {
1257239845Skib			VATTR_NULL(&vattr);
1258239845Skib			vattr.va_size = orig_size;
1259239845Skib			/* IO_SYNC is handled implicitely */
1260239845Skib			(void)VOP_SETATTR(vp, &vattr, cred);
1261239845Skib			uio->uio_offset -= orig_resid - uio->uio_resid;
1262239845Skib			uio->uio_resid = orig_resid;
1263239845Skib		}
1264239845Skib	}
1265239845Skib
1266191783Srmacklem	return (error);
1267191783Srmacklem}
1268191783Srmacklem
1269191783Srmacklem/*
1270191783Srmacklem * Get an nfs cache block.
1271191783Srmacklem *
1272191783Srmacklem * Allocate a new one if the block isn't currently in the cache
1273191783Srmacklem * and return the block marked busy. If the calling process is
1274191783Srmacklem * interrupted by a signal for an interruptible mount point, return
1275191783Srmacklem * NULL.
1276191783Srmacklem *
1277191783Srmacklem * The caller must carefully deal with the possible B_INVAL state of
1278191783Srmacklem * the buffer.  ncl_doio() clears B_INVAL (and ncl_asyncio() clears it
1279191783Srmacklem * indirectly), so synchronous reads can be issued without worrying about
1280191783Srmacklem * the B_INVAL state.  We have to be a little more careful when dealing
1281191783Srmacklem * with writes (see comments in nfs_write()) when extending a file past
1282191783Srmacklem * its EOF.
1283191783Srmacklem */
1284191783Srmacklemstatic struct buf *
1285191783Srmacklemnfs_getcacheblk(struct vnode *vp, daddr_t bn, int size, struct thread *td)
1286191783Srmacklem{
1287191783Srmacklem	struct buf *bp;
1288191783Srmacklem	struct mount *mp;
1289191783Srmacklem	struct nfsmount *nmp;
1290191783Srmacklem
1291191783Srmacklem	mp = vp->v_mount;
1292191783Srmacklem	nmp = VFSTONFS(mp);
1293191783Srmacklem
1294191783Srmacklem	if (nmp->nm_flag & NFSMNT_INT) {
1295191783Srmacklem 		sigset_t oldset;
1296191783Srmacklem
1297201029Srmacklem 		newnfs_set_sigmask(td, &oldset);
1298195821Srmacklem		bp = getblk(vp, bn, size, NFS_PCATCH, 0, 0);
1299201029Srmacklem 		newnfs_restore_sigmask(td, &oldset);
1300191783Srmacklem		while (bp == NULL) {
1301191783Srmacklem			if (newnfs_sigintr(nmp, td))
1302191783Srmacklem				return (NULL);
1303191783Srmacklem			bp = getblk(vp, bn, size, 0, 2 * hz, 0);
1304191783Srmacklem		}
1305191783Srmacklem	} else {
1306191783Srmacklem		bp = getblk(vp, bn, size, 0, 0, 0);
1307191783Srmacklem	}
1308191783Srmacklem
1309231330Srmacklem	if (vp->v_type == VREG)
1310231330Srmacklem		bp->b_blkno = bn * (vp->v_bufobj.bo_bsize / DEV_BSIZE);
1311191783Srmacklem	return (bp);
1312191783Srmacklem}
1313191783Srmacklem
1314191783Srmacklem/*
1315191783Srmacklem * Flush and invalidate all dirty buffers. If another process is already
1316191783Srmacklem * doing the flush, just wait for completion.
1317191783Srmacklem */
1318191783Srmacklemint
1319191783Srmacklemncl_vinvalbuf(struct vnode *vp, int flags, struct thread *td, int intrflg)
1320191783Srmacklem{
1321191783Srmacklem	struct nfsnode *np = VTONFS(vp);
1322191783Srmacklem	struct nfsmount *nmp = VFSTONFS(vp->v_mount);
1323191783Srmacklem	int error = 0, slpflag, slptimeo;
1324191783Srmacklem 	int old_lock = 0;
1325191783Srmacklem
1326191783Srmacklem	ASSERT_VOP_LOCKED(vp, "ncl_vinvalbuf");
1327191783Srmacklem
1328191783Srmacklem	if ((nmp->nm_flag & NFSMNT_INT) == 0)
1329191783Srmacklem		intrflg = 0;
1330191783Srmacklem	if ((nmp->nm_mountp->mnt_kern_flag & MNTK_UNMOUNTF))
1331191783Srmacklem		intrflg = 1;
1332191783Srmacklem	if (intrflg) {
1333195821Srmacklem		slpflag = NFS_PCATCH;
1334191783Srmacklem		slptimeo = 2 * hz;
1335191783Srmacklem	} else {
1336191783Srmacklem		slpflag = 0;
1337191783Srmacklem		slptimeo = 0;
1338191783Srmacklem	}
1339191783Srmacklem
1340191783Srmacklem	old_lock = ncl_upgrade_vnlock(vp);
1341193955Srmacklem	if (vp->v_iflag & VI_DOOMED) {
1342193955Srmacklem		/*
1343193955Srmacklem		 * Since vgonel() uses the generic vinvalbuf() to flush
1344193955Srmacklem		 * dirty buffers and it does not call this function, it
1345193955Srmacklem		 * is safe to just return OK when VI_DOOMED is set.
1346193955Srmacklem		 */
1347193955Srmacklem		ncl_downgrade_vnlock(vp, old_lock);
1348193955Srmacklem		return (0);
1349193955Srmacklem	}
1350193955Srmacklem
1351191783Srmacklem	/*
1352191783Srmacklem	 * Now, flush as required.
1353191783Srmacklem	 */
1354191783Srmacklem	if ((flags & V_SAVE) && (vp->v_bufobj.bo_object != NULL)) {
1355191783Srmacklem		VM_OBJECT_LOCK(vp->v_bufobj.bo_object);
1356191783Srmacklem		vm_object_page_clean(vp->v_bufobj.bo_object, 0, 0, OBJPC_SYNC);
1357191783Srmacklem		VM_OBJECT_UNLOCK(vp->v_bufobj.bo_object);
1358191783Srmacklem		/*
1359191783Srmacklem		 * If the page clean was interrupted, fail the invalidation.
1360191783Srmacklem		 * Not doing so, we run the risk of losing dirty pages in the
1361191783Srmacklem		 * vinvalbuf() call below.
1362191783Srmacklem		 */
1363191783Srmacklem		if (intrflg && (error = newnfs_sigintr(nmp, td)))
1364191783Srmacklem			goto out;
1365191783Srmacklem	}
1366191783Srmacklem
1367191783Srmacklem	error = vinvalbuf(vp, flags, slpflag, 0);
1368191783Srmacklem	while (error) {
1369191783Srmacklem		if (intrflg && (error = newnfs_sigintr(nmp, td)))
1370191783Srmacklem			goto out;
1371191783Srmacklem		error = vinvalbuf(vp, flags, 0, slptimeo);
1372191783Srmacklem	}
1373191783Srmacklem	mtx_lock(&np->n_mtx);
1374191783Srmacklem	if (np->n_directio_asyncwr == 0)
1375191783Srmacklem		np->n_flag &= ~NMODIFIED;
1376191783Srmacklem	mtx_unlock(&np->n_mtx);
1377191783Srmacklemout:
1378191783Srmacklem	ncl_downgrade_vnlock(vp, old_lock);
1379191783Srmacklem	return error;
1380191783Srmacklem}
1381191783Srmacklem
1382191783Srmacklem/*
1383191783Srmacklem * Initiate asynchronous I/O. Return an error if no nfsiods are available.
1384191783Srmacklem * This is mainly to avoid queueing async I/O requests when the nfsiods
1385191783Srmacklem * are all hung on a dead server.
1386191783Srmacklem *
1387191783Srmacklem * Note: ncl_asyncio() does not clear (BIO_ERROR|B_INVAL) but when the bp
1388191783Srmacklem * is eventually dequeued by the async daemon, ncl_doio() *will*.
1389191783Srmacklem */
1390191783Srmacklemint
1391191783Srmacklemncl_asyncio(struct nfsmount *nmp, struct buf *bp, struct ucred *cred, struct thread *td)
1392191783Srmacklem{
1393191783Srmacklem	int iod;
1394191783Srmacklem	int gotiod;
1395191783Srmacklem	int slpflag = 0;
1396191783Srmacklem	int slptimeo = 0;
1397191783Srmacklem	int error, error2;
1398191783Srmacklem
1399191783Srmacklem	/*
1400191783Srmacklem	 * Commits are usually short and sweet so lets save some cpu and
1401191783Srmacklem	 * leave the async daemons for more important rpc's (such as reads
1402191783Srmacklem	 * and writes).
1403191783Srmacklem	 */
1404191783Srmacklem	mtx_lock(&ncl_iod_mutex);
1405191783Srmacklem	if (bp->b_iocmd == BIO_WRITE && (bp->b_flags & B_NEEDCOMMIT) &&
1406191783Srmacklem	    (nmp->nm_bufqiods > ncl_numasync / 2)) {
1407191783Srmacklem		mtx_unlock(&ncl_iod_mutex);
1408191783Srmacklem		return(EIO);
1409191783Srmacklem	}
1410191783Srmacklemagain:
1411191783Srmacklem	if (nmp->nm_flag & NFSMNT_INT)
1412195821Srmacklem		slpflag = NFS_PCATCH;
1413191783Srmacklem	gotiod = FALSE;
1414191783Srmacklem
1415191783Srmacklem	/*
1416191783Srmacklem	 * Find a free iod to process this request.
1417191783Srmacklem	 */
1418191783Srmacklem	for (iod = 0; iod < ncl_numasync; iod++)
1419203119Srmacklem		if (ncl_iodwant[iod] == NFSIOD_AVAILABLE) {
1420191783Srmacklem			gotiod = TRUE;
1421191783Srmacklem			break;
1422191783Srmacklem		}
1423191783Srmacklem
1424191783Srmacklem	/*
1425191783Srmacklem	 * Try to create one if none are free.
1426191783Srmacklem	 */
1427220683Srmacklem	if (!gotiod)
1428220683Srmacklem		ncl_nfsiodnew();
1429220683Srmacklem	else {
1430191783Srmacklem		/*
1431191783Srmacklem		 * Found one, so wake it up and tell it which
1432191783Srmacklem		 * mount to process.
1433191783Srmacklem		 */
1434191783Srmacklem		NFS_DPF(ASYNCIO, ("ncl_asyncio: waking iod %d for mount %p\n",
1435191783Srmacklem		    iod, nmp));
1436203119Srmacklem		ncl_iodwant[iod] = NFSIOD_NOT_AVAILABLE;
1437191783Srmacklem		ncl_iodmount[iod] = nmp;
1438191783Srmacklem		nmp->nm_bufqiods++;
1439191783Srmacklem		wakeup(&ncl_iodwant[iod]);
1440191783Srmacklem	}
1441191783Srmacklem
1442191783Srmacklem	/*
1443191783Srmacklem	 * If none are free, we may already have an iod working on this mount
1444191783Srmacklem	 * point.  If so, it will process our request.
1445191783Srmacklem	 */
1446191783Srmacklem	if (!gotiod) {
1447191783Srmacklem		if (nmp->nm_bufqiods > 0) {
1448191783Srmacklem			NFS_DPF(ASYNCIO,
1449191783Srmacklem				("ncl_asyncio: %d iods are already processing mount %p\n",
1450191783Srmacklem				 nmp->nm_bufqiods, nmp));
1451191783Srmacklem			gotiod = TRUE;
1452191783Srmacklem		}
1453191783Srmacklem	}
1454191783Srmacklem
1455191783Srmacklem	/*
1456191783Srmacklem	 * If we have an iod which can process the request, then queue
1457191783Srmacklem	 * the buffer.
1458191783Srmacklem	 */
1459191783Srmacklem	if (gotiod) {
1460191783Srmacklem		/*
1461191783Srmacklem		 * Ensure that the queue never grows too large.  We still want
1462191783Srmacklem		 * to asynchronize so we block rather then return EIO.
1463191783Srmacklem		 */
1464191783Srmacklem		while (nmp->nm_bufqlen >= 2*ncl_numasync) {
1465191783Srmacklem			NFS_DPF(ASYNCIO,
1466191783Srmacklem				("ncl_asyncio: waiting for mount %p queue to drain\n", nmp));
1467191783Srmacklem			nmp->nm_bufqwant = TRUE;
1468201029Srmacklem 			error = newnfs_msleep(td, &nmp->nm_bufq,
1469201029Srmacklem			    &ncl_iod_mutex, slpflag | PRIBIO, "nfsaio",
1470201029Srmacklem  			   slptimeo);
1471191783Srmacklem			if (error) {
1472191783Srmacklem				error2 = newnfs_sigintr(nmp, td);
1473191783Srmacklem				if (error2) {
1474191783Srmacklem					mtx_unlock(&ncl_iod_mutex);
1475191783Srmacklem					return (error2);
1476191783Srmacklem				}
1477195821Srmacklem				if (slpflag == NFS_PCATCH) {
1478191783Srmacklem					slpflag = 0;
1479191783Srmacklem					slptimeo = 2 * hz;
1480191783Srmacklem				}
1481191783Srmacklem			}
1482191783Srmacklem			/*
1483191783Srmacklem			 * We might have lost our iod while sleeping,
1484191783Srmacklem			 * so check and loop if nescessary.
1485191783Srmacklem			 */
1486220683Srmacklem			goto again;
1487191783Srmacklem		}
1488191783Srmacklem
1489191783Srmacklem		/* We might have lost our nfsiod */
1490191783Srmacklem		if (nmp->nm_bufqiods == 0) {
1491191783Srmacklem			NFS_DPF(ASYNCIO,
1492191783Srmacklem				("ncl_asyncio: no iods after mount %p queue was drained, looping\n", nmp));
1493191783Srmacklem			goto again;
1494191783Srmacklem		}
1495191783Srmacklem
1496191783Srmacklem		if (bp->b_iocmd == BIO_READ) {
1497191783Srmacklem			if (bp->b_rcred == NOCRED && cred != NOCRED)
1498191783Srmacklem				bp->b_rcred = crhold(cred);
1499191783Srmacklem		} else {
1500191783Srmacklem			if (bp->b_wcred == NOCRED && cred != NOCRED)
1501191783Srmacklem				bp->b_wcred = crhold(cred);
1502191783Srmacklem		}
1503191783Srmacklem
1504191783Srmacklem		if (bp->b_flags & B_REMFREE)
1505191783Srmacklem			bremfreef(bp);
1506191783Srmacklem		BUF_KERNPROC(bp);
1507191783Srmacklem		TAILQ_INSERT_TAIL(&nmp->nm_bufq, bp, b_freelist);
1508191783Srmacklem		nmp->nm_bufqlen++;
1509191783Srmacklem		if ((bp->b_flags & B_DIRECT) && bp->b_iocmd == BIO_WRITE) {
1510191783Srmacklem			mtx_lock(&(VTONFS(bp->b_vp))->n_mtx);
1511191783Srmacklem			VTONFS(bp->b_vp)->n_flag |= NMODIFIED;
1512191783Srmacklem			VTONFS(bp->b_vp)->n_directio_asyncwr++;
1513191783Srmacklem			mtx_unlock(&(VTONFS(bp->b_vp))->n_mtx);
1514191783Srmacklem		}
1515191783Srmacklem		mtx_unlock(&ncl_iod_mutex);
1516191783Srmacklem		return (0);
1517191783Srmacklem	}
1518191783Srmacklem
1519191783Srmacklem	mtx_unlock(&ncl_iod_mutex);
1520191783Srmacklem
1521191783Srmacklem	/*
1522191783Srmacklem	 * All the iods are busy on other mounts, so return EIO to
1523191783Srmacklem	 * force the caller to process the i/o synchronously.
1524191783Srmacklem	 */
1525191783Srmacklem	NFS_DPF(ASYNCIO, ("ncl_asyncio: no iods available, i/o is synchronous\n"));
1526191783Srmacklem	return (EIO);
1527191783Srmacklem}
1528191783Srmacklem
1529191783Srmacklemvoid
1530191783Srmacklemncl_doio_directwrite(struct buf *bp)
1531191783Srmacklem{
1532191783Srmacklem	int iomode, must_commit;
1533191783Srmacklem	struct uio *uiop = (struct uio *)bp->b_caller1;
1534191783Srmacklem	char *iov_base = uiop->uio_iov->iov_base;
1535191783Srmacklem
1536191783Srmacklem	iomode = NFSWRITE_FILESYNC;
1537191783Srmacklem	uiop->uio_td = NULL; /* NULL since we're in nfsiod */
1538207082Srmacklem	ncl_writerpc(bp->b_vp, uiop, bp->b_wcred, &iomode, &must_commit, 0);
1539191783Srmacklem	KASSERT((must_commit == 0), ("ncl_doio_directwrite: Did not commit write"));
1540191783Srmacklem	free(iov_base, M_NFSDIRECTIO);
1541191783Srmacklem	free(uiop->uio_iov, M_NFSDIRECTIO);
1542191783Srmacklem	free(uiop, M_NFSDIRECTIO);
1543191783Srmacklem	if ((bp->b_flags & B_DIRECT) && bp->b_iocmd == BIO_WRITE) {
1544191783Srmacklem		struct nfsnode *np = VTONFS(bp->b_vp);
1545191783Srmacklem		mtx_lock(&np->n_mtx);
1546191783Srmacklem		np->n_directio_asyncwr--;
1547191783Srmacklem		if (np->n_directio_asyncwr == 0) {
1548191783Srmacklem			np->n_flag &= ~NMODIFIED;
1549191783Srmacklem			if ((np->n_flag & NFSYNCWAIT)) {
1550191783Srmacklem				np->n_flag &= ~NFSYNCWAIT;
1551191783Srmacklem				wakeup((caddr_t)&np->n_directio_asyncwr);
1552191783Srmacklem			}
1553191783Srmacklem		}
1554191783Srmacklem		mtx_unlock(&np->n_mtx);
1555191783Srmacklem	}
1556191783Srmacklem	bp->b_vp = NULL;
1557191783Srmacklem	relpbuf(bp, &ncl_pbuf_freecnt);
1558191783Srmacklem}
1559191783Srmacklem
1560191783Srmacklem/*
1561191783Srmacklem * Do an I/O operation to/from a cache block. This may be called
1562191783Srmacklem * synchronously or from an nfsiod.
1563191783Srmacklem */
1564191783Srmacklemint
1565207082Srmacklemncl_doio(struct vnode *vp, struct buf *bp, struct ucred *cr, struct thread *td,
1566207082Srmacklem    int called_from_strategy)
1567191783Srmacklem{
1568191783Srmacklem	struct uio *uiop;
1569191783Srmacklem	struct nfsnode *np;
1570191783Srmacklem	struct nfsmount *nmp;
1571191783Srmacklem	int error = 0, iomode, must_commit = 0;
1572191783Srmacklem	struct uio uio;
1573191783Srmacklem	struct iovec io;
1574191783Srmacklem	struct proc *p = td ? td->td_proc : NULL;
1575191783Srmacklem	uint8_t	iocmd;
1576191783Srmacklem
1577191783Srmacklem	np = VTONFS(vp);
1578191783Srmacklem	nmp = VFSTONFS(vp->v_mount);
1579191783Srmacklem	uiop = &uio;
1580191783Srmacklem	uiop->uio_iov = &io;
1581191783Srmacklem	uiop->uio_iovcnt = 1;
1582191783Srmacklem	uiop->uio_segflg = UIO_SYSSPACE;
1583191783Srmacklem	uiop->uio_td = td;
1584191783Srmacklem
1585191783Srmacklem	/*
1586191783Srmacklem	 * clear BIO_ERROR and B_INVAL state prior to initiating the I/O.  We
1587191783Srmacklem	 * do this here so we do not have to do it in all the code that
1588191783Srmacklem	 * calls us.
1589191783Srmacklem	 */
1590191783Srmacklem	bp->b_flags &= ~B_INVAL;
1591191783Srmacklem	bp->b_ioflags &= ~BIO_ERROR;
1592191783Srmacklem
1593191783Srmacklem	KASSERT(!(bp->b_flags & B_DONE), ("ncl_doio: bp %p already marked done", bp));
1594191783Srmacklem	iocmd = bp->b_iocmd;
1595191783Srmacklem	if (iocmd == BIO_READ) {
1596191783Srmacklem	    io.iov_len = uiop->uio_resid = bp->b_bcount;
1597191783Srmacklem	    io.iov_base = bp->b_data;
1598191783Srmacklem	    uiop->uio_rw = UIO_READ;
1599191783Srmacklem
1600191783Srmacklem	    switch (vp->v_type) {
1601191783Srmacklem	    case VREG:
1602191783Srmacklem		uiop->uio_offset = ((off_t)bp->b_blkno) * DEV_BSIZE;
1603191783Srmacklem		NFSINCRGLOBAL(newnfsstats.read_bios);
1604191783Srmacklem		error = ncl_readrpc(vp, uiop, cr);
1605191783Srmacklem
1606191783Srmacklem		if (!error) {
1607191783Srmacklem		    if (uiop->uio_resid) {
1608191783Srmacklem			/*
1609191783Srmacklem			 * If we had a short read with no error, we must have
1610191783Srmacklem			 * hit a file hole.  We should zero-fill the remainder.
1611191783Srmacklem			 * This can also occur if the server hits the file EOF.
1612191783Srmacklem			 *
1613191783Srmacklem			 * Holes used to be able to occur due to pending
1614191783Srmacklem			 * writes, but that is not possible any longer.
1615191783Srmacklem			 */
1616191783Srmacklem			int nread = bp->b_bcount - uiop->uio_resid;
1617233353Skib			ssize_t left = uiop->uio_resid;
1618191783Srmacklem
1619191783Srmacklem			if (left > 0)
1620191783Srmacklem				bzero((char *)bp->b_data + nread, left);
1621191783Srmacklem			uiop->uio_resid = 0;
1622191783Srmacklem		    }
1623191783Srmacklem		}
1624191783Srmacklem		/* ASSERT_VOP_LOCKED(vp, "ncl_doio"); */
1625191783Srmacklem		if (p && (vp->v_vflag & VV_TEXT)) {
1626191783Srmacklem			mtx_lock(&np->n_mtx);
1627191783Srmacklem			if (NFS_TIMESPEC_COMPARE(&np->n_mtime, &np->n_vattr.na_mtime)) {
1628191783Srmacklem				mtx_unlock(&np->n_mtx);
1629191783Srmacklem				PROC_LOCK(p);
1630191783Srmacklem				killproc(p, "text file modification");
1631191783Srmacklem				PROC_UNLOCK(p);
1632191783Srmacklem			} else
1633191783Srmacklem				mtx_unlock(&np->n_mtx);
1634191783Srmacklem		}
1635191783Srmacklem		break;
1636191783Srmacklem	    case VLNK:
1637191783Srmacklem		uiop->uio_offset = (off_t)0;
1638191783Srmacklem		NFSINCRGLOBAL(newnfsstats.readlink_bios);
1639191783Srmacklem		error = ncl_readlinkrpc(vp, uiop, cr);
1640191783Srmacklem		break;
1641191783Srmacklem	    case VDIR:
1642191783Srmacklem		NFSINCRGLOBAL(newnfsstats.readdir_bios);
1643191783Srmacklem		uiop->uio_offset = ((u_quad_t)bp->b_lblkno) * NFS_DIRBLKSIZ;
1644191783Srmacklem		if ((nmp->nm_flag & NFSMNT_RDIRPLUS) != 0) {
1645191783Srmacklem			error = ncl_readdirplusrpc(vp, uiop, cr, td);
1646191783Srmacklem			if (error == NFSERR_NOTSUPP)
1647191783Srmacklem				nmp->nm_flag &= ~NFSMNT_RDIRPLUS;
1648191783Srmacklem		}
1649191783Srmacklem		if ((nmp->nm_flag & NFSMNT_RDIRPLUS) == 0)
1650191783Srmacklem			error = ncl_readdirrpc(vp, uiop, cr, td);
1651191783Srmacklem		/*
1652191783Srmacklem		 * end-of-directory sets B_INVAL but does not generate an
1653191783Srmacklem		 * error.
1654191783Srmacklem		 */
1655191783Srmacklem		if (error == 0 && uiop->uio_resid == bp->b_bcount)
1656191783Srmacklem			bp->b_flags |= B_INVAL;
1657191783Srmacklem		break;
1658191783Srmacklem	    default:
1659191783Srmacklem		ncl_printf("ncl_doio:  type %x unexpected\n", vp->v_type);
1660191783Srmacklem		break;
1661191783Srmacklem	    };
1662191783Srmacklem	    if (error) {
1663191783Srmacklem		bp->b_ioflags |= BIO_ERROR;
1664191783Srmacklem		bp->b_error = error;
1665191783Srmacklem	    }
1666191783Srmacklem	} else {
1667191783Srmacklem	    /*
1668191783Srmacklem	     * If we only need to commit, try to commit
1669191783Srmacklem	     */
1670191783Srmacklem	    if (bp->b_flags & B_NEEDCOMMIT) {
1671191783Srmacklem		    int retv;
1672191783Srmacklem		    off_t off;
1673191783Srmacklem
1674191783Srmacklem		    off = ((u_quad_t)bp->b_blkno) * DEV_BSIZE + bp->b_dirtyoff;
1675191783Srmacklem		    retv = ncl_commit(vp, off, bp->b_dirtyend-bp->b_dirtyoff,
1676191783Srmacklem			bp->b_wcred, td);
1677191783Srmacklem		    if (retv == 0) {
1678191783Srmacklem			    bp->b_dirtyoff = bp->b_dirtyend = 0;
1679191783Srmacklem			    bp->b_flags &= ~(B_NEEDCOMMIT | B_CLUSTEROK);
1680191783Srmacklem			    bp->b_resid = 0;
1681191783Srmacklem			    bufdone(bp);
1682191783Srmacklem			    return (0);
1683191783Srmacklem		    }
1684191783Srmacklem		    if (retv == NFSERR_STALEWRITEVERF) {
1685191783Srmacklem			    ncl_clearcommit(vp->v_mount);
1686191783Srmacklem		    }
1687191783Srmacklem	    }
1688191783Srmacklem
1689191783Srmacklem	    /*
1690191783Srmacklem	     * Setup for actual write
1691191783Srmacklem	     */
1692191783Srmacklem	    mtx_lock(&np->n_mtx);
1693191783Srmacklem	    if ((off_t)bp->b_blkno * DEV_BSIZE + bp->b_dirtyend > np->n_size)
1694191783Srmacklem		bp->b_dirtyend = np->n_size - (off_t)bp->b_blkno * DEV_BSIZE;
1695191783Srmacklem	    mtx_unlock(&np->n_mtx);
1696191783Srmacklem
1697191783Srmacklem	    if (bp->b_dirtyend > bp->b_dirtyoff) {
1698191783Srmacklem		io.iov_len = uiop->uio_resid = bp->b_dirtyend
1699191783Srmacklem		    - bp->b_dirtyoff;
1700191783Srmacklem		uiop->uio_offset = (off_t)bp->b_blkno * DEV_BSIZE
1701191783Srmacklem		    + bp->b_dirtyoff;
1702191783Srmacklem		io.iov_base = (char *)bp->b_data + bp->b_dirtyoff;
1703191783Srmacklem		uiop->uio_rw = UIO_WRITE;
1704191783Srmacklem		NFSINCRGLOBAL(newnfsstats.write_bios);
1705191783Srmacklem
1706191783Srmacklem		if ((bp->b_flags & (B_ASYNC | B_NEEDCOMMIT | B_NOCACHE | B_CLUSTER)) == B_ASYNC)
1707191783Srmacklem		    iomode = NFSWRITE_UNSTABLE;
1708191783Srmacklem		else
1709191783Srmacklem		    iomode = NFSWRITE_FILESYNC;
1710191783Srmacklem
1711207082Srmacklem		error = ncl_writerpc(vp, uiop, cr, &iomode, &must_commit,
1712207082Srmacklem		    called_from_strategy);
1713191783Srmacklem
1714191783Srmacklem		/*
1715191783Srmacklem		 * When setting B_NEEDCOMMIT also set B_CLUSTEROK to try
1716191783Srmacklem		 * to cluster the buffers needing commit.  This will allow
1717191783Srmacklem		 * the system to submit a single commit rpc for the whole
1718191783Srmacklem		 * cluster.  We can do this even if the buffer is not 100%
1719191783Srmacklem		 * dirty (relative to the NFS blocksize), so we optimize the
1720191783Srmacklem		 * append-to-file-case.
1721191783Srmacklem		 *
1722191783Srmacklem		 * (when clearing B_NEEDCOMMIT, B_CLUSTEROK must also be
1723191783Srmacklem		 * cleared because write clustering only works for commit
1724191783Srmacklem		 * rpc's, not for the data portion of the write).
1725191783Srmacklem		 */
1726191783Srmacklem
1727191783Srmacklem		if (!error && iomode == NFSWRITE_UNSTABLE) {
1728191783Srmacklem		    bp->b_flags |= B_NEEDCOMMIT;
1729191783Srmacklem		    if (bp->b_dirtyoff == 0
1730191783Srmacklem			&& bp->b_dirtyend == bp->b_bcount)
1731191783Srmacklem			bp->b_flags |= B_CLUSTEROK;
1732191783Srmacklem		} else {
1733191783Srmacklem		    bp->b_flags &= ~(B_NEEDCOMMIT | B_CLUSTEROK);
1734191783Srmacklem		}
1735191783Srmacklem
1736191783Srmacklem		/*
1737191783Srmacklem		 * For an interrupted write, the buffer is still valid
1738191783Srmacklem		 * and the write hasn't been pushed to the server yet,
1739191783Srmacklem		 * so we can't set BIO_ERROR and report the interruption
1740191783Srmacklem		 * by setting B_EINTR. For the B_ASYNC case, B_EINTR
1741191783Srmacklem		 * is not relevant, so the rpc attempt is essentially
1742191783Srmacklem		 * a noop.  For the case of a V3 write rpc not being
1743191783Srmacklem		 * committed to stable storage, the block is still
1744191783Srmacklem		 * dirty and requires either a commit rpc or another
1745191783Srmacklem		 * write rpc with iomode == NFSV3WRITE_FILESYNC before
1746191783Srmacklem		 * the block is reused. This is indicated by setting
1747191783Srmacklem		 * the B_DELWRI and B_NEEDCOMMIT flags.
1748191783Srmacklem		 *
1749207082Srmacklem		 * EIO is returned by ncl_writerpc() to indicate a recoverable
1750207082Srmacklem		 * write error and is handled as above, except that
1751207082Srmacklem		 * B_EINTR isn't set. One cause of this is a stale stateid
1752207082Srmacklem		 * error for the RPC that indicates recovery is required,
1753207082Srmacklem		 * when called with called_from_strategy != 0.
1754207082Srmacklem		 *
1755191783Srmacklem		 * If the buffer is marked B_PAGING, it does not reside on
1756191783Srmacklem		 * the vp's paging queues so we cannot call bdirty().  The
1757191783Srmacklem		 * bp in this case is not an NFS cache block so we should
1758191783Srmacklem		 * be safe. XXX
1759191783Srmacklem		 *
1760191783Srmacklem		 * The logic below breaks up errors into recoverable and
1761191783Srmacklem		 * unrecoverable. For the former, we clear B_INVAL|B_NOCACHE
1762191783Srmacklem		 * and keep the buffer around for potential write retries.
1763191783Srmacklem		 * For the latter (eg ESTALE), we toss the buffer away (B_INVAL)
1764191783Srmacklem		 * and save the error in the nfsnode. This is less than ideal
1765191783Srmacklem		 * but necessary. Keeping such buffers around could potentially
1766191783Srmacklem		 * cause buffer exhaustion eventually (they can never be written
1767191783Srmacklem		 * out, so will get constantly be re-dirtied). It also causes
1768191783Srmacklem		 * all sorts of vfs panics. For non-recoverable write errors,
1769191783Srmacklem		 * also invalidate the attrcache, so we'll be forced to go over
1770191783Srmacklem		 * the wire for this object, returning an error to user on next
1771191783Srmacklem		 * call (most of the time).
1772191783Srmacklem		 */
1773191783Srmacklem    		if (error == EINTR || error == EIO || error == ETIMEDOUT
1774191783Srmacklem		    || (!error && (bp->b_flags & B_NEEDCOMMIT))) {
1775191783Srmacklem			int s;
1776191783Srmacklem
1777191783Srmacklem			s = splbio();
1778191783Srmacklem			bp->b_flags &= ~(B_INVAL|B_NOCACHE);
1779191783Srmacklem			if ((bp->b_flags & B_PAGING) == 0) {
1780191783Srmacklem			    bdirty(bp);
1781191783Srmacklem			    bp->b_flags &= ~B_DONE;
1782191783Srmacklem			}
1783207082Srmacklem			if ((error == EINTR || error == ETIMEDOUT) &&
1784207082Srmacklem			    (bp->b_flags & B_ASYNC) == 0)
1785191783Srmacklem			    bp->b_flags |= B_EINTR;
1786191783Srmacklem			splx(s);
1787191783Srmacklem	    	} else {
1788191783Srmacklem		    if (error) {
1789191783Srmacklem			bp->b_ioflags |= BIO_ERROR;
1790191783Srmacklem			bp->b_flags |= B_INVAL;
1791191783Srmacklem			bp->b_error = np->n_error = error;
1792191783Srmacklem			mtx_lock(&np->n_mtx);
1793191783Srmacklem			np->n_flag |= NWRITEERR;
1794191783Srmacklem			np->n_attrstamp = 0;
1795223280Srmacklem			KDTRACE_NFS_ATTRCACHE_FLUSH_DONE(vp);
1796191783Srmacklem			mtx_unlock(&np->n_mtx);
1797191783Srmacklem		    }
1798191783Srmacklem		    bp->b_dirtyoff = bp->b_dirtyend = 0;
1799191783Srmacklem		}
1800191783Srmacklem	    } else {
1801191783Srmacklem		bp->b_resid = 0;
1802191783Srmacklem		bufdone(bp);
1803191783Srmacklem		return (0);
1804191783Srmacklem	    }
1805191783Srmacklem	}
1806191783Srmacklem	bp->b_resid = uiop->uio_resid;
1807191783Srmacklem	if (must_commit)
1808191783Srmacklem	    ncl_clearcommit(vp->v_mount);
1809191783Srmacklem	bufdone(bp);
1810191783Srmacklem	return (error);
1811191783Srmacklem}
1812191783Srmacklem
1813191783Srmacklem/*
1814191783Srmacklem * Used to aid in handling ftruncate() operations on the NFS client side.
1815191783Srmacklem * Truncation creates a number of special problems for NFS.  We have to
1816191783Srmacklem * throw away VM pages and buffer cache buffers that are beyond EOF, and
1817191783Srmacklem * we have to properly handle VM pages or (potentially dirty) buffers
1818191783Srmacklem * that straddle the truncation point.
1819191783Srmacklem */
1820191783Srmacklem
1821191783Srmacklemint
1822191783Srmacklemncl_meta_setsize(struct vnode *vp, struct ucred *cred, struct thread *td, u_quad_t nsize)
1823191783Srmacklem{
1824191783Srmacklem	struct nfsnode *np = VTONFS(vp);
1825191783Srmacklem	u_quad_t tsize;
1826231330Srmacklem	int biosize = vp->v_bufobj.bo_bsize;
1827191783Srmacklem	int error = 0;
1828191783Srmacklem
1829191783Srmacklem	mtx_lock(&np->n_mtx);
1830191783Srmacklem	tsize = np->n_size;
1831191783Srmacklem	np->n_size = nsize;
1832191783Srmacklem	mtx_unlock(&np->n_mtx);
1833191783Srmacklem
1834191783Srmacklem	if (nsize < tsize) {
1835191783Srmacklem		struct buf *bp;
1836191783Srmacklem		daddr_t lbn;
1837191783Srmacklem		int bufsize;
1838191783Srmacklem
1839191783Srmacklem		/*
1840191783Srmacklem		 * vtruncbuf() doesn't get the buffer overlapping the
1841191783Srmacklem		 * truncation point.  We may have a B_DELWRI and/or B_CACHE
1842191783Srmacklem		 * buffer that now needs to be truncated.
1843191783Srmacklem		 */
1844191783Srmacklem		error = vtruncbuf(vp, cred, td, nsize, biosize);
1845191783Srmacklem		lbn = nsize / biosize;
1846191783Srmacklem		bufsize = nsize & (biosize - 1);
1847191783Srmacklem		bp = nfs_getcacheblk(vp, lbn, bufsize, td);
1848191783Srmacklem 		if (!bp)
1849191783Srmacklem 			return EINTR;
1850191783Srmacklem		if (bp->b_dirtyoff > bp->b_bcount)
1851191783Srmacklem			bp->b_dirtyoff = bp->b_bcount;
1852191783Srmacklem		if (bp->b_dirtyend > bp->b_bcount)
1853191783Srmacklem			bp->b_dirtyend = bp->b_bcount;
1854191783Srmacklem		bp->b_flags |= B_RELBUF;  /* don't leave garbage around */
1855191783Srmacklem		brelse(bp);
1856191783Srmacklem	} else {
1857191783Srmacklem		vnode_pager_setsize(vp, nsize);
1858191783Srmacklem	}
1859191783Srmacklem	return(error);
1860191783Srmacklem}
1861191783Srmacklem
1862